code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def _lowerCamelCase ( a_ : int):
lowerCamelCase :List[Any] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _lowerCamelCase ( a_ : int = 50_00):
lowerCamelCase :str = [(i * (3 * i - 1)) // 2 for i in range(1 , __UpperCamelCase)]
for i, pentagonal_i in enumerate(__UpperCamelCase):
for j in range(__UpperCamelCase , len(__UpperCamelCase)):
lowerCamelCase :Optional[int] = pentagonal_nums[j]
lowerCamelCase :List[Any] = pentagonal_i + pentagonal_j
lowerCamelCase :Optional[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(__UpperCamelCase) and is_pentagonal(__UpperCamelCase):
return b
return -1
if __name__ == "__main__":
print(F'{solution() = }')
| 721
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 0
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _lowerCAmelCase :
def __init__( self : List[Any] , __snake_case : Optional[Any] , ):
lowerCamelCase :Tuple = parent
lowerCamelCase :Union[str, Any] = 13
lowerCamelCase :Optional[int] = 7
lowerCamelCase :Dict = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Optional[int] = True
lowerCamelCase :Any = 99
lowerCamelCase :List[Any] = 32
lowerCamelCase :List[Any] = 2
lowerCamelCase :str = 4
lowerCamelCase :List[Any] = 37
lowerCamelCase :List[str] = '''gelu'''
lowerCamelCase :Any = 0.1
lowerCamelCase :Union[str, Any] = 0.1
lowerCamelCase :Any = 512
lowerCamelCase :Union[str, Any] = 16
lowerCamelCase :int = 2
lowerCamelCase :Union[str, Any] = 0.0_2
lowerCamelCase :Optional[int] = 3
lowerCamelCase :int = 4
lowerCamelCase :List[Any] = None
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase :int = None
if self.use_input_mask:
lowerCamelCase :Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase :Union[str, Any] = None
lowerCamelCase :str = None
lowerCamelCase :Any = None
if self.use_labels:
lowerCamelCase :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase :Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase :List[str] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : Union[str, Any] ):
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) :Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase :Optional[int] = True
lowerCamelCase :List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case ( self : Optional[int] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any , __snake_case : Any ):
lowerCamelCase :List[Any] = TFEsmModel(config=A_ )
lowerCamelCase :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase :List[str] = model(A_ )
lowerCamelCase :int = [input_ids, input_mask]
lowerCamelCase :List[str] = model(A_ )
lowerCamelCase :str = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Union[str, Any] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : int , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str] , ):
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :str = TFEsmModel(config=A_ )
lowerCamelCase :Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowerCamelCase :Optional[Any] = model(A_ )
lowerCamelCase :Union[str, Any] = [input_ids, input_mask]
lowerCamelCase :List[str] = model(A_ , encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
lowerCamelCase :Optional[int] = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[Any] ):
lowerCamelCase :Any = TFEsmForMaskedLM(config=A_ )
lowerCamelCase :Tuple = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Any , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[Any] , __snake_case : int , __snake_case : Tuple , __snake_case : Optional[int] ):
lowerCamelCase :List[Any] = self.num_labels
lowerCamelCase :Any = TFEsmForTokenClassification(config=A_ )
lowerCamelCase :Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase :Any = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) :Tuple = config_and_inputs
lowerCamelCase :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
_UpperCAmelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : List[Any] ):
lowerCamelCase :Optional[int] = TFEsmModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=A_ , hidden_size=37 )
def snake_case ( self : Any ):
self.config_tester.run_common_tests()
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def snake_case ( self : List[Any] ):
lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def snake_case ( self : str ):
lowerCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def snake_case ( self : List[Any] ):
lowerCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def snake_case ( self : Tuple ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Dict = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def snake_case ( self : Optional[int] ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Any = model_class(A_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase :Dict = model.get_bias()
assert isinstance(A_ , A_ )
for k, v in name.items():
assert isinstance(A_ , tf.Variable )
else:
lowerCamelCase :int = model.get_output_embeddings()
assert x is None
lowerCamelCase :Tuple = model.get_bias()
assert name is None
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Any = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase :Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase :List[Any] = model(A_ )[0]
lowerCamelCase :int = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , A_ )
# compare the actual values for a slice.
lowerCamelCase :str = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase :List[Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase :Optional[Any] = model(A_ )[0]
# compare the actual values for a slice.
lowerCamelCase :List[Any] = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 700
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 0
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
def __init__( self : str , __snake_case : List[Any] , __snake_case : str=13 , __snake_case : str=3 , __snake_case : Optional[Any]=True , __snake_case : Tuple=True , __snake_case : Any=0.1 , __snake_case : str=0.1 , __snake_case : Union[str, Any]=224 , __snake_case : Any=1000 , __snake_case : str=[3, 3, 6, 4] , __snake_case : int=[48, 56, 112, 220] , ):
lowerCamelCase :int = parent
lowerCamelCase :Tuple = batch_size
lowerCamelCase :int = num_channels
lowerCamelCase :Optional[int] = is_training
lowerCamelCase :str = use_labels
lowerCamelCase :Dict = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :Union[str, Any] = num_labels
lowerCamelCase :Optional[int] = image_size
lowerCamelCase :List[Any] = layer_depths
lowerCamelCase :List[str] = embed_dims
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase :int = None
if self.use_labels:
lowerCamelCase :int = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase :Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__snake_case , layer_scale_init_value=1e-5 , )
def snake_case ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any] ):
lowerCamelCase :Optional[Any] = SwiftFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Tuple = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def snake_case ( self : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Dict ):
lowerCamelCase :Dict = self.num_labels
lowerCamelCase :Optional[Any] = SwiftFormerForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Dict = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowerCamelCase :Dict = SwiftFormerForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase :Optional[int] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Optional[int] ):
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) :str = self.prepare_config_and_inputs()
lowerCamelCase :Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
_UpperCAmelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Any = SwiftFormerModelTester(self )
lowerCamelCase :Union[str, Any] = ConfigTester(
self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def snake_case ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def snake_case ( self : List[Any] ):
pass
def snake_case ( self : List[Any] ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Any = model_class(__snake_case )
lowerCamelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :List[str] = [*signature.parameters.keys()]
lowerCamelCase :Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : List[str] ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def snake_case ( self : Optional[int] ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = SwiftFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : int ):
def check_hidden_states_output(__snake_case : Any , __snake_case : str , __snake_case : Optional[int] ):
lowerCamelCase :List[Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :List[str] = outputs.hidden_states
lowerCamelCase :List[str] = 8
self.assertEqual(len(__snake_case ) , __snake_case ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__snake_case ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowerCamelCase , lowerCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :int = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
def _config_zero_init(__snake_case : Union[str, Any] ):
lowerCamelCase :List[str] = copy.deepcopy(__snake_case )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__snake_case , __snake_case , 1e-1_0 )
if isinstance(getattr(__snake_case , __snake_case , __snake_case ) , __snake_case ):
lowerCamelCase :List[Any] = _config_zero_init(getattr(__snake_case , __snake_case ) )
setattr(__snake_case , __snake_case , __snake_case )
return configs_no_init
lowerCamelCase , lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :List[str] = _config_zero_init(__snake_case )
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(config=__snake_case )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : int ):
pass
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Optional[Any] = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(__snake_case )
lowerCamelCase :Tuple = self.default_image_processor
lowerCamelCase :List[str] = prepare_img()
lowerCamelCase :Any = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :str = model(**__snake_case )
# verify the logits
lowerCamelCase :Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :str = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
| 701
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 0
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] , __snake_case : str = None , __snake_case : List[str] = None , __snake_case : str = None , __snake_case : Optional[Any] = None , __snake_case : Dict = False , __snake_case : int = False , __snake_case : Union[str, Any] = None , **__snake_case : Optional[int] , ):
lowerCamelCase :List[Any] = path_or_paths
lowerCamelCase :Any = split if split or isinstance(__snake_case , __snake_case ) else '''train'''
lowerCamelCase :Optional[Any] = features
lowerCamelCase :Optional[int] = cache_dir
lowerCamelCase :int = keep_in_memory
lowerCamelCase :Optional[Any] = streaming
lowerCamelCase :Tuple = num_proc
lowerCamelCase :str = kwargs
@abstractmethod
def snake_case ( self : Any ):
pass
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any , __snake_case : int = None , __snake_case : int = None , __snake_case : Dict = False , __snake_case : Optional[Any] = False , __snake_case : int = None , **__snake_case : Optional[int] , ):
lowerCamelCase :int = features
lowerCamelCase :List[Any] = cache_dir
lowerCamelCase :str = keep_in_memory
lowerCamelCase :Optional[Any] = streaming
lowerCamelCase :Optional[int] = num_proc
lowerCamelCase :int = kwargs
@abstractmethod
def snake_case ( self : List[str] ):
pass
| 702
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 0
|
from math import ceil, sqrt
def _lowerCamelCase ( a_ : int = 1_00_00_00):
lowerCamelCase :Tuple = 0
for outer_width in range(3 , (limit // 4) + 2):
if outer_width**2 > limit:
lowerCamelCase :int = max(ceil(sqrt(outer_width**2 - limit)) , 1)
else:
lowerCamelCase :Union[str, Any] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'{solution() = }')
| 703
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _lowerCAmelCase :
def __init__( self : List[str] , __snake_case : Collection[float] | None = None ):
if components is None:
lowerCamelCase :Optional[Any] = []
lowerCamelCase :Dict = list(__snake_case )
def __len__( self : Dict ):
return len(self.__components )
def __str__( self : Union[str, Any] ):
return "(" + ",".join(map(__snake_case , self.__components ) ) + ")"
def __add__( self : Optional[Any] , __snake_case : Vector ):
lowerCamelCase :Any = len(self )
if size == len(__snake_case ):
lowerCamelCase :Tuple = [self.__components[i] + other.component(__snake_case ) for i in range(__snake_case )]
return Vector(__snake_case )
else:
raise Exception('''must have the same size''' )
def __sub__( self : Any , __snake_case : Vector ):
lowerCamelCase :Dict = len(self )
if size == len(__snake_case ):
lowerCamelCase :List[str] = [self.__components[i] - other.component(__snake_case ) for i in range(__snake_case )]
return Vector(__snake_case )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self : Any , __snake_case : float ):
...
@overload
def __mul__( self : Optional[int] , __snake_case : Vector ):
...
def __mul__( self : int , __snake_case : float | Vector ):
if isinstance(__snake_case , (float, int) ):
lowerCamelCase :List[str] = [c * other for c in self.__components]
return Vector(__snake_case )
elif isinstance(__snake_case , __snake_case ) and len(self ) == len(__snake_case ):
lowerCamelCase :Any = len(self )
lowerCamelCase :Any = [self.__components[i] * other.component(__snake_case ) for i in range(__snake_case )]
return sum(__snake_case )
else: # error case
raise Exception('''invalid operand!''' )
def snake_case ( self : Optional[int] ):
return Vector(self.__components )
def snake_case ( self : Dict , __snake_case : int ):
if isinstance(__snake_case , __snake_case ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def snake_case ( self : Union[str, Any] , __snake_case : int , __snake_case : float ):
assert -len(self.__components ) <= pos < len(self.__components )
lowerCamelCase :Any = value
def snake_case ( self : str ):
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
lowerCamelCase :Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(__snake_case ) )
def snake_case ( self : Dict , __snake_case : Vector , __snake_case : bool = False ):
lowerCamelCase :Dict = self * other
lowerCamelCase :Any = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _lowerCamelCase ( a_ : int):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
return Vector([0] * dimension)
def _lowerCamelCase ( a_ : int , a_ : int):
assert isinstance(_lowerCamelCase , _lowerCamelCase) and (isinstance(_lowerCamelCase , _lowerCamelCase))
lowerCamelCase :str = [0] * dimension
lowerCamelCase :Union[str, Any] = 1
return Vector(_lowerCamelCase)
def _lowerCamelCase ( a_ : float , a_ : Vector , a_ : Vector):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
and (isinstance(_lowerCamelCase , (int, float)))
)
return x * scalar + y
def _lowerCamelCase ( a_ : int , a_ : int , a_ : int):
random.seed(_lowerCamelCase)
lowerCamelCase :Optional[Any] = [random.randint(_lowerCamelCase , _lowerCamelCase) for _ in range(_lowerCamelCase)]
return Vector(_lowerCamelCase)
class _lowerCAmelCase :
def __init__( self : Optional[Any] , __snake_case : list[list[float]] , __snake_case : int , __snake_case : int ):
lowerCamelCase :Optional[int] = matrix
lowerCamelCase :Dict = w
lowerCamelCase :int = h
def __str__( self : int ):
lowerCamelCase :Optional[int] = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , __snake_case : Matrix ):
if self.__width == other.width() and self.__height == other.height():
lowerCamelCase :Union[str, Any] = []
for i in range(self.__height ):
lowerCamelCase :Any = [
self.__matrix[i][j] + other.component(__snake_case , __snake_case )
for j in range(self.__width )
]
matrix.append(__snake_case )
return Matrix(__snake_case , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self : int , __snake_case : Matrix ):
if self.__width == other.width() and self.__height == other.height():
lowerCamelCase :Tuple = []
for i in range(self.__height ):
lowerCamelCase :List[Any] = [
self.__matrix[i][j] - other.component(__snake_case , __snake_case )
for j in range(self.__width )
]
matrix.append(__snake_case )
return Matrix(__snake_case , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self : Tuple , __snake_case : float ):
...
@overload
def __mul__( self : int , __snake_case : Vector ):
...
def __mul__( self : Dict , __snake_case : float | Vector ):
if isinstance(__snake_case , __snake_case ): # matrix-vector
if len(__snake_case ) == self.__width:
lowerCamelCase :Tuple = zero_vector(self.__height )
for i in range(self.__height ):
lowerCamelCase :Optional[int] = [
self.__matrix[i][j] * other.component(__snake_case )
for j in range(self.__width )
]
ans.change_component(__snake_case , sum(__snake_case ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(__snake_case , (int, float) ): # matrix-scalar
lowerCamelCase :List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__snake_case , self.__width , self.__height )
return None
def snake_case ( self : List[str] ):
return self.__height
def snake_case ( self : int ):
return self.__width
def snake_case ( self : str , __snake_case : int , __snake_case : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def snake_case ( self : Any , __snake_case : int , __snake_case : int , __snake_case : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
lowerCamelCase :Dict = value
else:
raise Exception('''change_component: indices out of bounds''' )
def snake_case ( self : Optional[Any] , __snake_case : int , __snake_case : int ):
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
lowerCamelCase :Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__snake_case ) ):
lowerCamelCase :Any = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__snake_case , self.__width - 1 , self.__height - 1 ).determinant()
def snake_case ( self : Dict , __snake_case : int , __snake_case : int ):
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__snake_case , __snake_case )
else:
raise Exception('''Indices out of bounds''' )
def snake_case ( self : List[str] ):
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowerCamelCase :Dict = [
self.__matrix[0][y] * self.cofactor(0 , __snake_case ) for y in range(self.__width )
]
return sum(__snake_case )
def _lowerCamelCase ( a_ : int):
lowerCamelCase :list[list[float]] = [[0] * n for _ in range(_lowerCamelCase)]
return Matrix(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
def _lowerCamelCase ( a_ : int , a_ : int , a_ : int , a_ : int):
random.seed(_lowerCamelCase)
lowerCamelCase :list[list[float]] = [
[random.randint(_lowerCamelCase , _lowerCamelCase) for _ in range(_lowerCamelCase)] for _ in range(_lowerCamelCase)
]
return Matrix(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
| 704
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
A__ = logging.get_logger(__name__)
A__ = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'deberta-v2'
def __init__( self : Optional[Any] , __snake_case : Union[str, Any]=128100 , __snake_case : Optional[int]=1536 , __snake_case : Dict=24 , __snake_case : int=24 , __snake_case : Tuple=6144 , __snake_case : Union[str, Any]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : Dict=0.1 , __snake_case : int=512 , __snake_case : int=0 , __snake_case : int=0.0_2 , __snake_case : int=1e-7 , __snake_case : List[str]=False , __snake_case : Union[str, Any]=-1 , __snake_case : List[Any]=0 , __snake_case : Optional[Any]=True , __snake_case : Tuple=None , __snake_case : Any=0 , __snake_case : int="gelu" , **__snake_case : Any , ):
super().__init__(**_a )
lowerCamelCase :Union[str, Any] = hidden_size
lowerCamelCase :Dict = num_hidden_layers
lowerCamelCase :Union[str, Any] = num_attention_heads
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :List[Any] = hidden_act
lowerCamelCase :Optional[int] = hidden_dropout_prob
lowerCamelCase :Dict = attention_probs_dropout_prob
lowerCamelCase :int = max_position_embeddings
lowerCamelCase :Any = type_vocab_size
lowerCamelCase :List[Any] = initializer_range
lowerCamelCase :int = relative_attention
lowerCamelCase :Tuple = max_relative_positions
lowerCamelCase :int = pad_token_id
lowerCamelCase :Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
lowerCamelCase :str = [x.strip() for x in pos_att_type.lower().split('''|''' )]
lowerCamelCase :Any = pos_att_type
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Tuple = layer_norm_eps
lowerCamelCase :Any = kwargs.get('''pooler_hidden_size''' , _a )
lowerCamelCase :Union[str, Any] = pooler_dropout
lowerCamelCase :List[Any] = pooler_hidden_act
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase :Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def snake_case ( self : Optional[int] ):
return 12
def snake_case ( self : int , __snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional["TensorType"] = None , __snake_case : int = 3 , __snake_case : int = 40 , __snake_case : int = 40 , __snake_case : "PreTrainedTokenizerBase" = None , ):
lowerCamelCase :Any = super().generate_dummy_inputs(preprocessor=_a , framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 705
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __a , unittest.TestCase ):
_UpperCAmelCase = LongformerTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = LongformerTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCamelCase :List[str] = dict(zip(a_ , range(len(a_ ) ) ) )
lowerCamelCase :List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase :Any = {"""unk_token""": """<unk>"""}
lowerCamelCase :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a_ ) )
def snake_case ( self : Union[str, Any] , **__snake_case : List[Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
def snake_case ( self : str , **__snake_case : str ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
def snake_case ( self : Tuple , __snake_case : Tuple ):
lowerCamelCase :int = """lower newer"""
lowerCamelCase :Optional[int] = """lower newer"""
return input_text, output_text
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase :int = """lower newer"""
lowerCamelCase :Union[str, Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCamelCase :int = tokenizer.tokenize(a_ ) # , add_prefix_space=True)
self.assertListEqual(a_ , a_ )
lowerCamelCase :Any = tokens + [tokenizer.unk_token]
lowerCamelCase :str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def snake_case ( self : List[Any] ):
lowerCamelCase :int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=a_ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=a_ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[int] = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
lowerCamelCase :Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=a_ )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a_ )
lowerCamelCase :Tuple = tokenizer.encode(
'''sequence builders''' , add_special_tokens=a_ , add_prefix_space=a_ )
lowerCamelCase :Optional[Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=a_ , add_prefix_space=a_ )
lowerCamelCase :List[str] = tokenizer.build_inputs_with_special_tokens(a_ )
lowerCamelCase :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def snake_case ( self : str ):
lowerCamelCase :Optional[Any] = self.get_tokenizer()
lowerCamelCase :Dict = """Encode this sequence."""
lowerCamelCase :Optional[Any] = tokenizer.byte_encoder[""" """.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowerCamelCase :Union[str, Any] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
lowerCamelCase :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a_ , a_ )
lowerCamelCase :Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
lowerCamelCase :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a_ , a_ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowerCamelCase :Union[str, Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase :str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a_ , a_ )
# Testing spaces after special tokens
lowerCamelCase :List[str] = """<mask>"""
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(a_ , lstrip=a_ , rstrip=a_ )} ) # mask token has a left space
lowerCamelCase :str = tokenizer.convert_tokens_to_ids(a_ )
lowerCamelCase :int = """Encode <mask> sequence"""
lowerCamelCase :Tuple = """Encode <mask>sequence"""
lowerCamelCase :List[str] = tokenizer.encode(a_ )
lowerCamelCase :Optional[int] = encoded.index(a_ )
lowerCamelCase :Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a_ , a_ )
lowerCamelCase :Union[str, Any] = tokenizer.encode(a_ )
lowerCamelCase :Optional[int] = encoded.index(a_ )
lowerCamelCase :Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a_ , a_ )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : List[str] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :str = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
lowerCamelCase :Dict = self.tokenizer_class.from_pretrained(a_ , **a_ )
lowerCamelCase :List[str] = """A, <mask> AllenNLP sentence."""
lowerCamelCase :Tuple = tokenizer_r.encode_plus(a_ , add_special_tokens=a_ , return_token_type_ids=a_ )
lowerCamelCase :Optional[int] = tokenizer_p.encode_plus(a_ , add_special_tokens=a_ , return_token_type_ids=a_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :int = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :int = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
a_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
a_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def snake_case ( self : List[str] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCamelCase :Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
lowerCamelCase :Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCamelCase :Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , a_ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , a_ )
self.assertEqual(post_processor_state['''trim_offsets'''] , a_ )
def snake_case ( self : List[str] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[int] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase :int = F"{text_of_1_token} {text_of_1_token}"
lowerCamelCase :Optional[int] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
lowerCamelCase :Dict = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
lowerCamelCase :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
lowerCamelCase :str = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
lowerCamelCase :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
lowerCamelCase :Dict = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ), len(a_ ) + 1 + len(a_ )) , )
lowerCamelCase :Tuple = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
lowerCamelCase :Union[str, Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ), len(a_ ) + 1 + len(a_ )) , )
lowerCamelCase :Tuple = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
lowerCamelCase :Union[str, Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )) , )
lowerCamelCase :Tuple = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
lowerCamelCase :Any = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )) , )
lowerCamelCase :List[str] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
lowerCamelCase :Optional[Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )) , )
| 706
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 0
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
A__ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(__lowerCAmelCase )
def __call__( self : Any , __snake_case : Any , **__snake_case : List[str] ):
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case ( self : List[str] , **__snake_case : Optional[int] ):
return {}, {}, {}
def snake_case ( self : List[Any] , __snake_case : List[Any] ):
lowerCamelCase :Optional[Any] = load_image(__lowerCAmelCase )
lowerCamelCase :Union[str, Any] = image.size
lowerCamelCase :Optional[int] = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def snake_case ( self : str , __snake_case : str ):
lowerCamelCase :Tuple = self.model(**__lowerCAmelCase )
return model_outputs
def snake_case ( self : Dict , __snake_case : Union[str, Any] ):
lowerCamelCase :Dict = model_outputs.predicted_depth
lowerCamelCase :str = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__lowerCAmelCase )
lowerCamelCase :Optional[int] = prediction.squeeze().cpu().numpy()
lowerCamelCase :Dict = (output * 255 / np.max(__lowerCAmelCase )).astype('''uint8''' )
lowerCamelCase :List[Any] = Image.fromarray(__lowerCAmelCase )
lowerCamelCase :List[str] = {}
lowerCamelCase :str = predicted_depth
lowerCamelCase :Optional[int] = depth
return output_dict
| 707
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _lowerCAmelCase ( _UpperCAmelCase ):
_UpperCAmelCase = 'distilbert'
_UpperCAmelCase = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : List[Any] , __snake_case : str=30522 , __snake_case : List[Any]=512 , __snake_case : Any=False , __snake_case : List[str]=6 , __snake_case : str=12 , __snake_case : Optional[Any]=768 , __snake_case : Any=4 * 768 , __snake_case : Optional[int]=0.1 , __snake_case : int=0.1 , __snake_case : Any="gelu" , __snake_case : Tuple=0.0_2 , __snake_case : List[str]=0.1 , __snake_case : Dict=0.2 , __snake_case : str=0 , **__snake_case : List[str] , ):
lowerCamelCase :Dict = vocab_size
lowerCamelCase :str = max_position_embeddings
lowerCamelCase :Optional[Any] = sinusoidal_pos_embds
lowerCamelCase :Optional[Any] = n_layers
lowerCamelCase :List[Any] = n_heads
lowerCamelCase :int = dim
lowerCamelCase :str = hidden_dim
lowerCamelCase :Optional[Any] = dropout
lowerCamelCase :Any = attention_dropout
lowerCamelCase :Dict = activation
lowerCamelCase :Tuple = initializer_range
lowerCamelCase :str = qa_dropout
lowerCamelCase :List[str] = seq_classif_dropout
super().__init__(**lowercase_ , pad_token_id=lowercase_ )
class _lowerCAmelCase ( _UpperCAmelCase ):
@property
def snake_case ( self : List[Any] ):
if self.task == "multiple-choice":
lowerCamelCase :Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase :Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 708
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 0
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
A__ = logging.get_logger(__name__)
# General docstring
A__ = '''PoolFormerConfig'''
# Base docstring
A__ = '''sail/poolformer_s12'''
A__ = [1, 512, 7, 7]
# Image classification docstring
A__ = '''sail/poolformer_s12'''
A__ = '''tabby, tabby cat'''
A__ = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _lowerCamelCase ( a_ : Optional[Any] , a_ : float = 0.0 , a_ : bool = False):
if drop_prob == 0.0 or not training:
return input
lowerCamelCase :Union[str, Any] = 1 - drop_prob
lowerCamelCase :Tuple = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowerCamelCase :Any = keep_prob + torch.rand(UpperCAmelCase__ , dtype=input.dtype , device=input.device)
random_tensor.floor_() # binarize
lowerCamelCase :Optional[Any] = input.div(UpperCAmelCase__) * random_tensor
return output
class _lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , __snake_case : Optional[float] = None ):
super().__init__()
lowerCamelCase :Tuple = drop_prob
def snake_case ( self : Union[str, Any] , __snake_case : torch.Tensor ):
return drop_path(__lowerCAmelCase , self.drop_prob , self.training )
def snake_case ( self : List[str] ):
return "p={}".format(self.drop_prob )
class _lowerCAmelCase ( nn.Module ):
def __init__( self : Union[str, Any] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict=None ):
super().__init__()
lowerCamelCase :int = patch_size if isinstance(__lowerCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
lowerCamelCase :str = stride if isinstance(__lowerCAmelCase , collections.abc.Iterable ) else (stride, stride)
lowerCamelCase :Union[str, Any] = padding if isinstance(__lowerCAmelCase , collections.abc.Iterable ) else (padding, padding)
lowerCamelCase :Union[str, Any] = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=__lowerCAmelCase )
lowerCamelCase :Union[str, Any] = norm_layer(__lowerCAmelCase ) if norm_layer else nn.Identity()
def snake_case ( self : List[str] , __snake_case : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = self.projection(__lowerCAmelCase )
lowerCamelCase :Tuple = self.norm(__lowerCAmelCase )
return embeddings
class _lowerCAmelCase ( nn.GroupNorm ):
def __init__( self : Tuple , __snake_case : int , **__snake_case : Optional[Any] ):
super().__init__(1 , __lowerCAmelCase , **__lowerCAmelCase )
class _lowerCAmelCase ( nn.Module ):
def __init__( self : int , __snake_case : Optional[Any] ):
super().__init__()
lowerCamelCase :List[str] = nn.AvgPoolad(__lowerCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=__lowerCAmelCase )
def snake_case ( self : Optional[Any] , __snake_case : List[Any] ):
return self.pool(__lowerCAmelCase ) - hidden_states
class _lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Any ):
super().__init__()
lowerCamelCase :Tuple = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
lowerCamelCase :List[Any] = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
lowerCamelCase :int = PoolFormerDropPath(__lowerCAmelCase )
if isinstance(config.hidden_act , __lowerCAmelCase ):
lowerCamelCase :Dict = ACTaFN[config.hidden_act]
else:
lowerCamelCase :List[str] = config.hidden_act
def snake_case ( self : Tuple , __snake_case : List[str] ):
lowerCamelCase :Optional[Any] = self.conva(__lowerCAmelCase )
lowerCamelCase :Tuple = self.act_fn(__lowerCAmelCase )
lowerCamelCase :str = self.drop(__lowerCAmelCase )
lowerCamelCase :List[str] = self.conva(__lowerCAmelCase )
lowerCamelCase :List[Any] = self.drop(__lowerCAmelCase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
def __init__( self : Dict , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Dict , __snake_case : Dict , __snake_case : Tuple , __snake_case : Union[str, Any] ):
super().__init__()
lowerCamelCase :Tuple = PoolFormerPooling(__lowerCAmelCase )
lowerCamelCase :str = PoolFormerOutput(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase :int = PoolFormerGroupNorm(__lowerCAmelCase )
lowerCamelCase :List[str] = PoolFormerGroupNorm(__lowerCAmelCase )
# Useful for training neural nets
lowerCamelCase :List[Any] = PoolFormerDropPath(__lowerCAmelCase ) if drop_path > 0.0 else nn.Identity()
lowerCamelCase :Any = config.use_layer_scale
if config.use_layer_scale:
lowerCamelCase :Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowerCAmelCase) ) , requires_grad=__lowerCAmelCase )
lowerCamelCase :Optional[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowerCAmelCase) ) , requires_grad=__lowerCAmelCase )
def snake_case ( self : Dict , __snake_case : Dict ):
if self.use_layer_scale:
lowerCamelCase :Union[str, Any] = self.pooling(self.before_norm(__lowerCAmelCase ) )
lowerCamelCase :List[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowerCamelCase :str = hidden_states + self.drop_path(__lowerCAmelCase )
lowerCamelCase :List[str] = ()
lowerCamelCase :List[str] = self.output(self.after_norm(__lowerCAmelCase ) )
lowerCamelCase :Optional[int] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowerCamelCase :List[Any] = hidden_states + self.drop_path(__lowerCAmelCase )
lowerCamelCase :int = (output,) + outputs
return outputs
else:
lowerCamelCase :Optional[int] = self.drop_path(self.pooling(self.before_norm(__lowerCAmelCase ) ) )
# First residual connection
lowerCamelCase :Optional[int] = pooling_output + hidden_states
lowerCamelCase :Optional[int] = ()
# Second residual connection inside the PoolFormerOutput block
lowerCamelCase :Tuple = self.drop_path(self.output(self.after_norm(__lowerCAmelCase ) ) )
lowerCamelCase :Optional[int] = hidden_states + layer_output
lowerCamelCase :Dict = (output,) + outputs
return outputs
class _lowerCAmelCase ( nn.Module ):
def __init__( self : Any , __snake_case : Dict ):
super().__init__()
lowerCamelCase :Optional[Any] = config
# stochastic depth decay rule
lowerCamelCase :List[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowerCamelCase :List[str] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowerCamelCase :str = nn.ModuleList(__lowerCAmelCase )
# Transformer blocks
lowerCamelCase :Optional[Any] = []
lowerCamelCase :int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowerCamelCase :int = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__lowerCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__lowerCAmelCase ) )
lowerCamelCase :Optional[Any] = nn.ModuleList(__lowerCAmelCase )
def snake_case ( self : Any , __snake_case : Dict , __snake_case : Optional[Any]=False , __snake_case : List[str]=True ):
lowerCamelCase :Any = () if output_hidden_states else None
lowerCamelCase :Optional[int] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowerCamelCase , lowerCamelCase :str = layers
# Get patch embeddings from hidden_states
lowerCamelCase :Dict = embedding_layer(__lowerCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(__lowerCAmelCase ):
lowerCamelCase :List[Any] = blk(__lowerCAmelCase )
lowerCamelCase :Any = layer_outputs[0]
if output_hidden_states:
lowerCamelCase :int = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase )
class _lowerCAmelCase ( UpperCAmelCase__ ):
_UpperCAmelCase = PoolFormerConfig
_UpperCAmelCase = 'poolformer'
_UpperCAmelCase = 'pixel_values'
_UpperCAmelCase = True
def snake_case ( self : List[str] , __snake_case : List[str] ):
if isinstance(__lowerCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def snake_case ( self : Optional[int] , __snake_case : Optional[int] , __snake_case : Optional[Any]=False ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase :Dict = value
A__ = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
A__ = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , UpperCAmelCase__ , )
class _lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : Any , __snake_case : Union[str, Any] ):
super().__init__(__lowerCAmelCase )
lowerCamelCase :Optional[int] = config
lowerCamelCase :int = PoolFormerEncoder(__lowerCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def snake_case ( self : List[Any] ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case ( self : int , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , ):
lowerCamelCase :Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase :Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowerCamelCase :int = self.encoder(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase , )
lowerCamelCase :Dict = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class _lowerCAmelCase ( nn.Module ):
def __init__( self : Union[str, Any] , __snake_case : Any ):
super().__init__()
lowerCamelCase :Optional[Any] = nn.Linear(config.hidden_size , config.hidden_size )
def snake_case ( self : int , __snake_case : List[Any] ):
lowerCamelCase :Optional[int] = self.dense(__lowerCAmelCase )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , UpperCAmelCase__ , )
class _lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : Dict , __snake_case : Optional[int] ):
super().__init__(__lowerCAmelCase )
lowerCamelCase :Tuple = config.num_labels
lowerCamelCase :Tuple = PoolFormerModel(__lowerCAmelCase )
# Final norm
lowerCamelCase :Optional[int] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowerCamelCase :Union[str, Any] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case ( self : Union[str, Any] , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[torch.LongTensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , ):
lowerCamelCase :Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase :List[Any] = self.poolformer(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase , )
lowerCamelCase :str = outputs[0]
lowerCamelCase :str = self.classifier(self.norm(__lowerCAmelCase ).mean([-2, -1] ) )
lowerCamelCase :str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase :List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase :Tuple = '''single_label_classification'''
else:
lowerCamelCase :Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCamelCase :Optional[int] = MSELoss()
if self.num_labels == 1:
lowerCamelCase :Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase :int = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase :Optional[Any] = CrossEntropyLoss()
lowerCamelCase :List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase :str = BCEWithLogitsLoss()
lowerCamelCase :Dict = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
lowerCamelCase :Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states )
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
A__ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
A__ = {"""facebook/blenderbot_small-90M""": 512}
def _lowerCamelCase ( a_ : List[str]):
lowerCamelCase :Tuple = set()
lowerCamelCase :str = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCamelCase :List[Any] = char
lowerCamelCase :Any = set(_snake_case)
return pairs
class _lowerCAmelCase ( __UpperCAmelCase ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , __snake_case : Any , __snake_case : Optional[int] , __snake_case : List[str]="__start__" , __snake_case : str="__end__" , __snake_case : Union[str, Any]="__unk__" , __snake_case : Optional[int]="__null__" , **__snake_case : Union[str, Any] , ):
super().__init__(unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , **UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase :Dict = json.load(UpperCAmelCase_ )
lowerCamelCase :int = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCamelCase :int = merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase :Optional[Any] = [tuple(merge.split() ) for merge in merges]
lowerCamelCase :Union[str, Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCamelCase :Tuple = {}
@property
def snake_case ( self : Tuple ):
return len(self.encoder )
def snake_case ( self : List[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self : str , __snake_case : Optional[Any] ):
if token in self.cache:
return self.cache[token]
lowerCamelCase :List[str] = re.sub('''([.,!?()])''' , R''' \1''' , UpperCAmelCase_ )
lowerCamelCase :Optional[Any] = re.sub('''(\')''' , R''' \1 ''' , UpperCAmelCase_ )
lowerCamelCase :Optional[int] = re.sub(R'''\s{2,}''' , ''' ''' , UpperCAmelCase_ )
if "\n" in token:
lowerCamelCase :List[Any] = token.replace('''\n''' , ''' __newln__''' )
lowerCamelCase :str = token.split(''' ''' )
lowerCamelCase :Dict = []
for token in tokens:
if not len(UpperCAmelCase_ ):
continue
lowerCamelCase :Tuple = token.lower()
lowerCamelCase :List[Any] = tuple(UpperCAmelCase_ )
lowerCamelCase :Union[str, Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCamelCase :Union[str, Any] = get_pairs(UpperCAmelCase_ )
if not pairs:
words.append(UpperCAmelCase_ )
continue
while True:
lowerCamelCase :List[Any] = min(UpperCAmelCase_ , key=lambda __snake_case : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase , lowerCamelCase :int = bigram
lowerCamelCase :Any = []
lowerCamelCase :List[Any] = 0
while i < len(UpperCAmelCase_ ):
try:
lowerCamelCase :Optional[Any] = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
new_word.extend(word[i:j] )
lowerCamelCase :str = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase :Tuple = tuple(UpperCAmelCase_ )
lowerCamelCase :Union[str, Any] = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
lowerCamelCase :Union[str, Any] = get_pairs(UpperCAmelCase_ )
lowerCamelCase :str = '''@@ '''.join(UpperCAmelCase_ )
lowerCamelCase :List[str] = word[:-4]
lowerCamelCase :List[Any] = word
words.append(UpperCAmelCase_ )
return " ".join(UpperCAmelCase_ )
def snake_case ( self : Dict , __snake_case : Dict ):
lowerCamelCase :Dict = []
lowerCamelCase :Optional[int] = re.findall(R'''\S+\n?''' , UpperCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def snake_case ( self : List[Any] , __snake_case : Tuple ):
lowerCamelCase :List[str] = token.lower()
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def snake_case ( self : Optional[Any] , __snake_case : Dict ):
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def snake_case ( self : str , __snake_case : Optional[int] ):
lowerCamelCase :List[str] = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def snake_case ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase :Any = os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Union[str, Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + '''\n''' )
lowerCamelCase :List[Any] = 0
with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase :int = token_index
writer.write(''' '''.join(UpperCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 711
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 0
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _lowerCAmelCase ( UpperCamelCase_ ):
_UpperCAmelCase = 4_2
class _lowerCAmelCase ( nn.Module ):
def __init__( self : List[Any] , __snake_case : int=3 , __snake_case : Optional[int]=3 , __snake_case : Union[str, Any]=("DownEncoderBlock2D",) , __snake_case : Tuple=(64,) , __snake_case : List[str]=2 , __snake_case : Union[str, Any]=32 , __snake_case : str="silu" , __snake_case : int=True , ):
super().__init__()
lowerCamelCase :int = layers_per_block
lowerCamelCase :Dict = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase :Optional[Any] = None
lowerCamelCase :Union[str, Any] = nn.ModuleList([] )
# down
lowerCamelCase :Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
lowerCamelCase :Optional[int] = output_channel
lowerCamelCase :List[str] = block_out_channels[i]
lowerCamelCase :Tuple = i == len(UpperCamelCase__ ) - 1
lowerCamelCase :Any = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
lowerCamelCase :Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
lowerCamelCase :Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
lowerCamelCase :Optional[int] = nn.SiLU()
lowerCamelCase :int = 2 * out_channels if double_z else out_channels
lowerCamelCase :int = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
lowerCamelCase :List[Any] = False
def snake_case ( self : Tuple , __snake_case : List[str] ):
lowerCamelCase :Any = x
lowerCamelCase :Tuple = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__snake_case : Union[str, Any] ):
def custom_forward(*__snake_case : List[str] ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
lowerCamelCase :Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
lowerCamelCase :Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
lowerCamelCase :Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
lowerCamelCase :Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
lowerCamelCase :str = down_block(UpperCamelCase__ )
# middle
lowerCamelCase :Optional[Any] = self.mid_block(UpperCamelCase__ )
# post-process
lowerCamelCase :Dict = self.conv_norm_out(UpperCamelCase__ )
lowerCamelCase :Union[str, Any] = self.conv_act(UpperCamelCase__ )
lowerCamelCase :Any = self.conv_out(UpperCamelCase__ )
return sample
class _lowerCAmelCase ( nn.Module ):
def __init__( self : List[str] , __snake_case : int=3 , __snake_case : Optional[Any]=3 , __snake_case : Any=("UpDecoderBlock2D",) , __snake_case : Union[str, Any]=(64,) , __snake_case : List[Any]=2 , __snake_case : Optional[int]=32 , __snake_case : List[Any]="silu" , __snake_case : Optional[Any]="group" , ):
super().__init__()
lowerCamelCase :Dict = layers_per_block
lowerCamelCase :List[Any] = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase :List[str] = None
lowerCamelCase :Tuple = nn.ModuleList([] )
lowerCamelCase :int = in_channels if norm_type == '''spatial''' else None
# mid
lowerCamelCase :Any = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
lowerCamelCase :int = list(reversed(UpperCamelCase__ ) )
lowerCamelCase :List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
lowerCamelCase :List[str] = output_channel
lowerCamelCase :Optional[int] = reversed_block_out_channels[i]
lowerCamelCase :int = i == len(UpperCamelCase__ ) - 1
lowerCamelCase :Dict = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
lowerCamelCase :Optional[int] = output_channel
# out
if norm_type == "spatial":
lowerCamelCase :List[str] = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
lowerCamelCase :List[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
lowerCamelCase :List[Any] = nn.SiLU()
lowerCamelCase :str = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
lowerCamelCase :Any = False
def snake_case ( self : Optional[int] , __snake_case : Any , __snake_case : Dict=None ):
lowerCamelCase :int = z
lowerCamelCase :Any = self.conv_in(UpperCamelCase__ )
lowerCamelCase :Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__snake_case : Optional[int] ):
def custom_forward(*__snake_case : Dict ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
lowerCamelCase :List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
lowerCamelCase :Optional[int] = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
lowerCamelCase :List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
lowerCamelCase :Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase :List[str] = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
lowerCamelCase :Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
lowerCamelCase :Optional[Any] = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase :Optional[int] = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
lowerCamelCase :Any = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
lowerCamelCase :str = self.conv_norm_out(UpperCamelCase__ )
else:
lowerCamelCase :Dict = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase :str = self.conv_act(UpperCamelCase__ )
lowerCamelCase :Union[str, Any] = self.conv_out(UpperCamelCase__ )
return sample
class _lowerCAmelCase ( nn.Module ):
def __init__( self : int , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : Tuple="random" , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
super().__init__()
lowerCamelCase :Optional[Any] = n_e
lowerCamelCase :Tuple = vq_embed_dim
lowerCamelCase :Union[str, Any] = beta
lowerCamelCase :List[str] = legacy
lowerCamelCase :Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowerCamelCase :str = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
lowerCamelCase :Union[str, Any] = self.used.shape[0]
lowerCamelCase :List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCamelCase :List[str] = self.re_embed
lowerCamelCase :Tuple = self.re_embed + 1
print(
F"Remapping {self.n_e} indices to {self.re_embed} indices. "
F"Using {self.unknown_index} for unknown indices." )
else:
lowerCamelCase :Dict = n_e
lowerCamelCase :int = sane_index_shape
def snake_case ( self : Tuple , __snake_case : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = inds.shape
assert len(UpperCamelCase__ ) > 1
lowerCamelCase :Optional[int] = inds.reshape(ishape[0] , -1 )
lowerCamelCase :int = self.used.to(UpperCamelCase__ )
lowerCamelCase :List[str] = (inds[:, :, None] == used[None, None, ...]).long()
lowerCamelCase :Union[str, Any] = match.argmax(-1 )
lowerCamelCase :List[Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCamelCase :Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowerCamelCase :str = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case ( self : List[str] , __snake_case : Dict ):
lowerCamelCase :Optional[Any] = inds.shape
assert len(UpperCamelCase__ ) > 1
lowerCamelCase :List[Any] = inds.reshape(ishape[0] , -1 )
lowerCamelCase :Optional[Any] = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
lowerCamelCase :Any = 0 # simply set to zero
lowerCamelCase :Any = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case ( self : int , __snake_case : Dict ):
lowerCamelCase :List[Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowerCamelCase :Any = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCamelCase :Dict = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
lowerCamelCase :List[Any] = self.embedding(UpperCamelCase__ ).view(z.shape )
lowerCamelCase :Union[str, Any] = None
lowerCamelCase :Any = None
# compute loss for embedding
if not self.legacy:
lowerCamelCase :Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCamelCase :Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCamelCase :List[Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCamelCase :Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowerCamelCase :Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowerCamelCase :Optional[Any] = self.remap_to_used(UpperCamelCase__ )
lowerCamelCase :str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowerCamelCase :List[Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case ( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Any ):
if self.remap is not None:
lowerCamelCase :Any = indices.reshape(shape[0] , -1 ) # add batch axis
lowerCamelCase :List[str] = self.unmap_to_all(UpperCamelCase__ )
lowerCamelCase :int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCamelCase :Any = self.embedding(UpperCamelCase__ )
if shape is not None:
lowerCamelCase :List[Any] = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
lowerCamelCase :Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _lowerCAmelCase ( UpperCamelCase_ ):
def __init__( self : Union[str, Any] , __snake_case : Any , __snake_case : List[str]=False ):
lowerCamelCase :List[Any] = parameters
lowerCamelCase :Dict = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
lowerCamelCase :Any = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
lowerCamelCase :Optional[int] = deterministic
lowerCamelCase :Optional[Any] = torch.exp(0.5 * self.logvar )
lowerCamelCase :Tuple = torch.exp(self.logvar )
if self.deterministic:
lowerCamelCase :int = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case ( self : Any , __snake_case : Optional[torch.Generator] = None ):
lowerCamelCase :int = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
lowerCamelCase :Union[str, Any] = self.mean + self.std * sample
return x
def snake_case ( self : str , __snake_case : str=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case ( self : List[Any] , __snake_case : Tuple , __snake_case : str=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
lowerCamelCase :Optional[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case ( self : List[str] ):
return self.mean
| 712
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
_UpperCAmelCase = 4_2
_UpperCAmelCase = 4_2
_UpperCAmelCase = 4_2
@dataclass
class _lowerCAmelCase :
_UpperCAmelCase = 4_2
_UpperCAmelCase = 4_2
_UpperCAmelCase = None
_UpperCAmelCase = None
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'train'
_UpperCAmelCase = 'dev'
_UpperCAmelCase = 'test'
class _lowerCAmelCase :
@staticmethod
def snake_case ( __snake_case : Optional[Any] , __snake_case : Optional[Any] ):
raise NotImplementedError
@staticmethod
def snake_case ( __snake_case : List[Any] ):
raise NotImplementedError
@staticmethod
def snake_case ( __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Any=False , __snake_case : Tuple="[CLS]" , __snake_case : str=1 , __snake_case : str="[SEP]" , __snake_case : Optional[Any]=False , __snake_case : List[Any]=False , __snake_case : Union[str, Any]=0 , __snake_case : Optional[int]=0 , __snake_case : List[str]=-100 , __snake_case : Union[str, Any]=0 , __snake_case : int=True , ):
lowerCamelCase :Union[str, Any] = {label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )}
lowerCamelCase :str = []
for ex_index, example in enumerate(_SCREAMING_SNAKE_CASE ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' , _SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase :str = []
lowerCamelCase :int = []
for word, label in zip(example.words , example.labels ):
lowerCamelCase :List[str] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_SCREAMING_SNAKE_CASE ) > 0:
tokens.extend(_SCREAMING_SNAKE_CASE )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_SCREAMING_SNAKE_CASE ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCamelCase :Optional[int] = tokenizer.num_special_tokens_to_add()
if len(_SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count:
lowerCamelCase :Optional[Any] = tokens[: (max_seq_length - special_tokens_count)]
lowerCamelCase :List[str] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCamelCase :int = [sequence_a_segment_id] * len(_SCREAMING_SNAKE_CASE )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCamelCase :List[Any] = [cls_token] + tokens
lowerCamelCase :str = [pad_token_label_id] + label_ids
lowerCamelCase :Any = [cls_token_segment_id] + segment_ids
lowerCamelCase :int = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCamelCase :Optional[Any] = [1 if mask_padding_with_zero else 0] * len(_SCREAMING_SNAKE_CASE )
# Zero-pad up to the sequence length.
lowerCamelCase :Tuple = max_seq_length - len(_SCREAMING_SNAKE_CASE )
if pad_on_left:
lowerCamelCase :int = ([pad_token] * padding_length) + input_ids
lowerCamelCase :Optional[int] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCamelCase :Optional[int] = ([pad_token_segment_id] * padding_length) + segment_ids
lowerCamelCase :int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(_SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(_SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(_SCREAMING_SNAKE_CASE ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_SCREAMING_SNAKE_CASE ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_SCREAMING_SNAKE_CASE ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_SCREAMING_SNAKE_CASE ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_SCREAMING_SNAKE_CASE ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_SCREAMING_SNAKE_CASE ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowerCamelCase :Union[str, Any] = None
features.append(
InputFeatures(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , label_ids=_SCREAMING_SNAKE_CASE ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 4_2
_UpperCAmelCase = nn.CrossEntropyLoss().ignore_index
def __init__( self : Any , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : str , __snake_case : List[Any] , __snake_case : Dict = None , __snake_case : Union[str, Any]=False , __snake_case : List[str] = Split.train , ):
lowerCamelCase :List[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_SCREAMING_SNAKE_CASE ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase :Dict = cached_features_file + '''.lock'''
with FileLock(_SCREAMING_SNAKE_CASE ):
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(F"Loading features from cached file {cached_features_file}" )
lowerCamelCase :Any = torch.load(_SCREAMING_SNAKE_CASE )
else:
logger.info(F"Creating features from dataset file at {data_dir}" )
lowerCamelCase :int = token_classification_task.read_examples_from_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCamelCase :List[Any] = token_classification_task.convert_examples_to_features(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"Saving features into cached file {cached_features_file}" )
torch.save(self.features , _SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , __snake_case : Any ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _lowerCAmelCase :
_UpperCAmelCase = 4_2
_UpperCAmelCase = -1_0_0
def __init__( self : List[str] , __snake_case : List[Any] , __snake_case : Any , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] = None , __snake_case : List[Any]=False , __snake_case : str = Split.train , ):
lowerCamelCase :Union[str, Any] = token_classification_task.read_examples_from_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCamelCase :List[Any] = token_classification_task.convert_examples_to_features(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCamelCase :int = tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowerCamelCase :List[str] = tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Union[str, Any] ):
return len(self.features )
def __getitem__( self : Optional[int] , __snake_case : List[Any] ):
return self.features[i]
| 713
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 0
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :Optional[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowerCamelCase :int = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def snake_case ( self : Dict ):
lowerCamelCase :Optional[int] = F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
lowerCamelCase :List[Any] = [sys.executable] + distributed_args
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 714
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 0
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
A__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _lowerCAmelCase ( nn.Module ):
def __init__( self : List[Any] , __snake_case : List[Any] ):
super().__init__()
lowerCamelCase :Any = torchvision.models.resnetaaa(pretrained=__lowercase )
lowerCamelCase :str = list(model.children() )[:-2]
lowerCamelCase :Any = nn.Sequential(*__lowercase )
lowerCamelCase :str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def snake_case ( self : Union[str, Any] , __snake_case : Dict ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowerCamelCase :Union[str, Any] = self.pool(self.model(__lowercase ) )
lowerCamelCase :Tuple = torch.flatten(__lowercase , start_dim=2 )
lowerCamelCase :List[Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _lowerCAmelCase ( lowercase__ ):
def __init__( self : Any , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[int] ):
lowerCamelCase :Tuple = [json.loads(__lowercase ) for l in open(__lowercase )]
lowerCamelCase :List[Any] = os.path.dirname(__lowercase )
lowerCamelCase :Optional[int] = tokenizer
lowerCamelCase :int = labels
lowerCamelCase :str = len(__lowercase )
lowerCamelCase :Any = max_seq_length
lowerCamelCase :Dict = transforms
def __len__( self : str ):
return len(self.data )
def __getitem__( self : List[str] , __snake_case : Tuple ):
lowerCamelCase :Dict = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=__lowercase ) )
lowerCamelCase , lowerCamelCase , lowerCamelCase :int = sentence[0], sentence[1:-1], sentence[-1]
lowerCamelCase :Tuple = sentence[: self.max_seq_length]
lowerCamelCase :Optional[int] = torch.zeros(self.n_classes )
lowerCamelCase :Tuple = 1
lowerCamelCase :List[str] = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
lowerCamelCase :int = self.transforms(__lowercase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def snake_case ( self : Any ):
lowerCamelCase :Any = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def _lowerCamelCase ( a_ : Optional[Any]):
lowerCamelCase :str = [len(row['''sentence''']) for row in batch]
lowerCamelCase , lowerCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE_), max(SCREAMING_SNAKE_CASE_)
lowerCamelCase :int = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.long)
lowerCamelCase :Dict = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)):
lowerCamelCase :Any = input_row['''sentence''']
lowerCamelCase :Union[str, Any] = 1
lowerCamelCase :List[str] = torch.stack([row['''image'''] for row in batch])
lowerCamelCase :Tuple = torch.stack([row['''label'''] for row in batch])
lowerCamelCase :int = torch.stack([row['''image_start_token'''] for row in batch])
lowerCamelCase :Union[str, Any] = torch.stack([row['''image_end_token'''] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(2_56),
transforms.CenterCrop(2_24),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
])
| 715
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( __snake_case , unittest.TestCase ):
_UpperCAmelCase = KandinskyVaaInpaintPipeline
_UpperCAmelCase = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_UpperCAmelCase = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_UpperCAmelCase = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase = False
@property
def snake_case ( self : Optional[int] ):
return 32
@property
def snake_case ( self : List[str] ):
return 32
@property
def snake_case ( self : List[Any] ):
return self.time_input_dim
@property
def snake_case ( self : Any ):
return self.time_input_dim * 4
@property
def snake_case ( self : str ):
return 100
@property
def snake_case ( self : List[str] ):
torch.manual_seed(0 )
lowerCamelCase :List[Any] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCamelCase :Optional[int] = UNetaDConditionModel(**A_ )
return model
@property
def snake_case ( self : Optional[int] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case ( self : Dict ):
torch.manual_seed(0 )
lowerCamelCase :Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :List[Any] = self.dummy_unet
lowerCamelCase :Optional[Any] = self.dummy_movq
lowerCamelCase :Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=A_ , )
lowerCamelCase :Dict = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def snake_case ( self : int , __snake_case : Union[str, Any] , __snake_case : int=0 ):
lowerCamelCase :int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
lowerCamelCase :Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
# create init_image
lowerCamelCase :Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
lowerCamelCase :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase :str = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
lowerCamelCase :Dict = np.ones((64, 64) , dtype=np.floataa )
lowerCamelCase :Union[str, Any] = 0
if str(A_ ).startswith('''mps''' ):
lowerCamelCase :Tuple = torch.manual_seed(A_ )
else:
lowerCamelCase :Optional[int] = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCamelCase :Optional[int] = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def snake_case ( self : List[str] ):
lowerCamelCase :int = "cpu"
lowerCamelCase :Dict = self.get_dummy_components()
lowerCamelCase :str = self.pipeline_class(**A_ )
lowerCamelCase :Any = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase :int = pipe(**self.get_dummy_inputs(A_ ) )
lowerCamelCase :List[Any] = output.images
lowerCamelCase :List[str] = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
lowerCamelCase :Optional[int] = image[0, -3:, -3:, -1]
lowerCamelCase :int = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
lowerCamelCase :List[str] = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def snake_case ( self : Union[str, Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : List[str] ):
lowerCamelCase :Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
lowerCamelCase :Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCamelCase :Tuple = np.ones((768, 768) , dtype=np.floataa )
lowerCamelCase :str = 0
lowerCamelCase :Tuple = "a hat"
lowerCamelCase :Tuple = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
lowerCamelCase :Optional[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
lowerCamelCase :int = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
lowerCamelCase :Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase :List[Any] = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCamelCase :Union[str, Any] = pipeline(
image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
lowerCamelCase :Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
| 716
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 0
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
A__ = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _lowerCamelCase ( a_ : Any):
lowerCamelCase :List[Any] = list(s_dict.keys())
for key in keys:
lowerCamelCase :List[str] = R'''.*/layers_(\d+)'''
lowerCamelCase :List[Any] = key
if re.match(a_ , a_):
lowerCamelCase :Tuple = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , a_)
lowerCamelCase :str = R'''(encoder|decoder)\/'''
if re.match(a_ , a_):
lowerCamelCase :Tuple = re.match(a_ , a_).groups()
if groups[0] == "encoder":
lowerCamelCase :Optional[Any] = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , a_)
lowerCamelCase :Any = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , a_)
elif groups[0] == "decoder":
lowerCamelCase :List[str] = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , a_)
lowerCamelCase :Dict = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , a_)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCamelCase :Any = new_key.replace(a_ , a_)
print(F"{key} -> {new_key}")
lowerCamelCase :Union[str, Any] = s_dict.pop(a_)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase :Any = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase :int = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
lowerCamelCase :List[str] = s_dict[key].shape[0]
lowerCamelCase :str = s_dict[key]
for idx in range(a_):
lowerCamelCase :Any = expert_weihts[idx]
print(F"{key} -> {key.replace('expert/' , 'nested fstring')}")
s_dict.pop(a_)
return s_dict
A__ = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[Any]):
import regex as re
with open(a_ , '''r''') as f:
lowerCamelCase :Any = f.read()
lowerCamelCase :Tuple = re.findall(R'''(.*) = ([0-9.]*)''' , a_)
lowerCamelCase :List[str] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCamelCase :Any = float(a_) if '''.''' in value else int(a_)
lowerCamelCase :Any = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , a_)[0]
lowerCamelCase :Optional[int] = str(activation[1])
lowerCamelCase :Tuple = num_experts
lowerCamelCase :Any = SwitchTransformersConfig(**a_)
return config
def _lowerCamelCase ( a_ : Dict , a_ : Optional[int] , a_ : List[str]=None , a_ : Tuple="./" , a_ : Tuple=8):
print(F"Loading flax weights from : {flax_checkpoint_path}")
lowerCamelCase :str = checkpoints.load_tax_checkpoint(a_)
if gin_file is not None:
lowerCamelCase :List[str] = convert_gin_to_config(a_ , a_)
else:
lowerCamelCase :Any = SwitchTransformersConfig.from_pretrained(a_)
lowerCamelCase :List[Any] = SwitchTransformersForConditionalGeneration(a_)
lowerCamelCase :str = flax_params['''target''']
lowerCamelCase :List[str] = flatten_dict(a_ , sep='''/''')
lowerCamelCase :Union[str, Any] = rename_keys(a_)
lowerCamelCase :Union[str, Any] = unflatten_dict(a_ , sep='''/''')
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(a_ , a_)
print(F"Save PyTorch model to {pytorch_dump_path}")
pt_model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
A__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 717
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCamelCase ( ):
lowerCamelCase :Dict = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
))
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__lowerCAmelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''')
# positional
parser.add_argument(
'''training_script''' , type=__lowerCAmelCase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__lowerCAmelCase)
return parser.parse_args()
def _lowerCamelCase ( ):
lowerCamelCase :List[str] = parse_args()
# Import training_script as a module.
lowerCamelCase :Optional[Any] = Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
lowerCamelCase :Any = script_fpath.stem
lowerCamelCase :Tuple = importlib.import_module(__lowerCAmelCase)
# Patch sys.argv
lowerCamelCase :Union[str, Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores)]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores)
if __name__ == "__main__":
main()
| 718
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 0
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A__ = logging.getLogger(__name__)
A__ = """Hello world! cécé herlolip"""
A__ = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any]):
lowerCamelCase :List[str] = BertAbsConfig(
temp_dir='''.''' , finetune_bert=lowercase__ , large=lowercase__ , share_emb=lowercase__ , use_bert_emb=lowercase__ , encoder='''bert''' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
lowerCamelCase :Optional[int] = torch.load(lowercase__ , lambda a_ , a_: storage)
lowerCamelCase :Union[str, Any] = AbsSummarizer(lowercase__ , torch.device('''cpu''') , lowercase__)
original.eval()
lowerCamelCase :Dict = BertAbsSummarizer(lowercase__ , torch.device('''cpu'''))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''')
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''')
lowerCamelCase :List[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''')
# prepare the model inputs
lowerCamelCase :Union[str, Any] = tokenizer.encode('''This is sample éàalj\'-.''')
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__)))
lowerCamelCase :Any = torch.tensor(lowercase__).unsqueeze(0)
lowerCamelCase :Union[str, Any] = tokenizer.encode('''This is sample 3 éàalj\'-.''')
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__)))
lowerCamelCase :str = torch.tensor(lowercase__).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
lowerCamelCase :Optional[int] = encoder_input_ids
lowerCamelCase :List[str] = decoder_input_ids
lowerCamelCase :Tuple = None
lowerCamelCase :int = None
lowerCamelCase :List[str] = None
lowerCamelCase :Optional[Any] = None
lowerCamelCase :Optional[int] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCamelCase :Dict = original(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__)[0]
lowerCamelCase :Union[str, Any] = original.generator(lowercase__)
lowerCamelCase :List[Any] = new_model(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__)[0]
lowerCamelCase :Dict = new_model.generator(lowercase__)
lowerCamelCase :Dict = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(lowercase__))
lowerCamelCase :Optional[Any] = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(lowercase__))
lowerCamelCase :List[str] = torch.allclose(lowercase__ , lowercase__ , atol=1e-3)
if are_identical:
logging.info('''all weights are equal up to 1e-3''')
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''')
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''')
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''')
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
A__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 719
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 0
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _lowerCAmelCase :
def __init__( self : Optional[int] , __snake_case : str = "cpu" , __snake_case : str = "openai/clip-vit-large-patch14" ):
lowerCamelCase :List[str] = device
lowerCamelCase :Tuple = CLIPTokenizerFast.from_pretrained(__a )
lowerCamelCase :Dict = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
lowerCamelCase :Dict = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
lowerCamelCase :Tuple = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowerCamelCase :List[Any] = torchvision.transforms.Resize(224 )
lowerCamelCase :Dict = torchvision.transforms.CenterCrop(224 )
def snake_case ( self : Tuple , __snake_case : str ):
lowerCamelCase :List[str] = self.resize(__a )
lowerCamelCase :str = self.center_crop(__a )
lowerCamelCase :int = self.normalize(__a )
return images
def __call__( self : int , __snake_case : Optional[Any]=None , __snake_case : Dict=None , **__snake_case : int ):
lowerCamelCase :str = self.tokenizer(text=__a , **__a )
lowerCamelCase :Optional[int] = self.preprocess_img(__a )
lowerCamelCase :str = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _lowerCAmelCase ( nn.Module ):
def __init__( self : List[str] , __snake_case : Optional[Any]=10 , __snake_case : Dict=0.0_1 , __snake_case : Optional[Any]=None , __snake_case : List[Any]=None , __snake_case : Any=None , __snake_case : int=None , __snake_case : Tuple=None , __snake_case : int=None , __snake_case : Union[str, Any]=False , __snake_case : str=True , __snake_case : List[str]="image" , __snake_case : Any=True , __snake_case : Dict=False , __snake_case : str=False , __snake_case : Any=False , ):
super().__init__()
lowerCamelCase :Any = None
lowerCamelCase :Optional[int] = device if device else get_device()
if vqgan:
lowerCamelCase :List[Any] = vqgan
else:
lowerCamelCase :Union[str, Any] = load_vqgan(self.device , conf_path=__a , ckpt_path=__a )
self.vqgan.eval()
if clip:
lowerCamelCase :str = clip
else:
lowerCamelCase :Tuple = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
lowerCamelCase :Any = ProcessorGradientFlow(device=self.device )
lowerCamelCase :List[Any] = iterations
lowerCamelCase :str = lr
lowerCamelCase :Optional[int] = log
lowerCamelCase :List[Any] = make_grid
lowerCamelCase :Optional[int] = return_val
lowerCamelCase :Optional[int] = quantize
lowerCamelCase :List[Any] = self.vqgan.decoder.z_shape
def snake_case ( self : int , __snake_case : Optional[Any]=None , __snake_case : List[Any]=None , __snake_case : List[Any]=5 , __snake_case : List[str]=True ):
lowerCamelCase :Optional[int] = []
if output_path is None:
lowerCamelCase :Optional[int] = """./animation.gif"""
if input_path is None:
lowerCamelCase :List[str] = self.save_path
lowerCamelCase :str = sorted(glob(input_path + '''/*''' ) )
if not len(__a ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(__a ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
lowerCamelCase :List[Any] = total_duration / len(__a )
lowerCamelCase :int = [frame_duration] * len(__a )
if extend_frames:
lowerCamelCase :Tuple = 1.5
lowerCamelCase :str = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(__a ) )
imageio.mimsave(__a , __a , duration=__a )
print(F"gif saved to {output_path}" )
def snake_case ( self : Optional[Any] , __snake_case : Tuple=None , __snake_case : Any=None ):
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
lowerCamelCase :Tuple = preprocess(Image.open(__a ) , target_image_size=256 ).to(self.device )
lowerCamelCase :Dict = preprocess_vqgan(__a )
lowerCamelCase :List[str] = self.vqgan.encode(__a )
return z
def snake_case ( self : Optional[Any] , __snake_case : Dict ):
lowerCamelCase :List[str] = self.latent.detach().requires_grad_()
lowerCamelCase :int = base_latent + transform_vector
if self.quantize:
lowerCamelCase :Union[str, Any] = self.vqgan.quantize(__a )
else:
lowerCamelCase :Optional[Any] = trans_latent
return self.vqgan.decode(__a )
def snake_case ( self : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : List[Any]=None ):
lowerCamelCase :Optional[int] = self.clip_preprocessor(text=__a , images=__a , return_tensors='''pt''' , padding=__a )
lowerCamelCase :Optional[int] = self.clip(**__a )
lowerCamelCase :Optional[int] = clip_outputs.logits_per_image
if weights is not None:
lowerCamelCase :List[str] = similarity_logits * weights
return similarity_logits.sum()
def snake_case ( self : Any , __snake_case : Tuple , __snake_case : Tuple , __snake_case : int ):
lowerCamelCase :List[Any] = self._get_clip_similarity(pos_prompts['''prompts'''] , __a , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
lowerCamelCase :Optional[Any] = self._get_clip_similarity(neg_prompts['''prompts'''] , __a , weights=neg_prompts['''weights'''] )
else:
lowerCamelCase :int = torch.tensor([1] , device=self.device )
lowerCamelCase :Dict = -torch.log(__a ) + torch.log(__a )
return loss
def snake_case ( self : Optional[int] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Dict ):
lowerCamelCase :Tuple = torch.randn_like(self.latent , requires_grad=__a , device=self.device )
lowerCamelCase :int = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCamelCase :Optional[Any] = self._add_vector(__a )
lowerCamelCase :Optional[Any] = loop_post_process(__a )
lowerCamelCase :Dict = self._get_CLIP_loss(__a , __a , __a )
print('''CLIP loss''' , __a )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=__a )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def snake_case ( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Tuple ):
wandb.init(reinit=__a , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
lowerCamelCase :Dict = Image.open(__a )
lowerCamelCase :Optional[Any] = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(__a ) )
def snake_case ( self : str , __snake_case : Union[str, Any] ):
if not prompts:
return []
lowerCamelCase :Dict = []
lowerCamelCase :str = []
if isinstance(__a , __a ):
lowerCamelCase :int = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(__a , (tuple, list) ):
lowerCamelCase :str = prompt[0]
lowerCamelCase :Optional[Any] = float(prompt[1] )
elif ":" in prompt:
lowerCamelCase :str = prompt.split(''':''' )
lowerCamelCase :int = float(__a )
else:
lowerCamelCase :Union[str, Any] = prompt
lowerCamelCase :Optional[int] = 1.0
processed_prompts.append(__a )
weights.append(__a )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a , device=self.device ),
}
def snake_case ( self : List[Any] , __snake_case : int , __snake_case : Any=None , __snake_case : Tuple=None , __snake_case : Tuple=True , __snake_case : Any=False , __snake_case : Any=True , __snake_case : List[str]=True , __snake_case : int=None , ):
if image_path:
lowerCamelCase :str = self._get_latent(__a )
else:
lowerCamelCase :List[str] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__a , __a , __a )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCamelCase :int = self.process_prompts(__a )
lowerCamelCase :int = self.process_prompts(__a )
if save_final and save_path is None:
lowerCamelCase :str = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(__a ):
os.makedirs(__a )
else:
lowerCamelCase :Dict = save_path + """_""" + get_timestamp()
os.makedirs(__a )
lowerCamelCase :Tuple = save_path
lowerCamelCase :str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(__a ) )
lowerCamelCase :Optional[int] = loop_post_process(__a )
for iter, transformed_img in enumerate(self._optimize_CLIP(__a , __a , __a ) ):
if show_intermediate:
show_pil(__a )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(__a )} )
if show_final:
show_pil(__a )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png" ) )
| 720
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 0
|
A__ : Union[str, Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A__ : str = [{"type": "code", "content": INSTALL_CONTENT}]
A__ : Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 721
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowerCAmelCase ( __a ):
_UpperCAmelCase = 'trocr'
_UpperCAmelCase = ['past_key_values']
_UpperCAmelCase = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : List[Any] , __snake_case : Optional[Any]=50265 , __snake_case : List[str]=1024 , __snake_case : int=12 , __snake_case : Tuple=16 , __snake_case : Union[str, Any]=4096 , __snake_case : List[Any]="gelu" , __snake_case : Optional[int]=512 , __snake_case : List[str]=0.1 , __snake_case : List[Any]=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : List[Any]=2 , __snake_case : int=0.0_2 , __snake_case : Union[str, Any]=0.0 , __snake_case : Tuple=True , __snake_case : int=False , __snake_case : int=True , __snake_case : List[Any]=True , __snake_case : Union[str, Any]=1 , __snake_case : str=0 , __snake_case : Dict=2 , **__snake_case : Optional[Any] , ):
lowerCamelCase :Union[str, Any] = vocab_size
lowerCamelCase :int = d_model
lowerCamelCase :Tuple = decoder_layers
lowerCamelCase :Any = decoder_attention_heads
lowerCamelCase :Dict = decoder_ffn_dim
lowerCamelCase :List[Any] = activation_function
lowerCamelCase :List[Any] = max_position_embeddings
lowerCamelCase :Union[str, Any] = dropout
lowerCamelCase :List[str] = attention_dropout
lowerCamelCase :List[str] = activation_dropout
lowerCamelCase :int = init_std
lowerCamelCase :List[str] = decoder_layerdrop
lowerCamelCase :str = use_cache
lowerCamelCase :List[str] = scale_embedding
lowerCamelCase :Union[str, Any] = use_learned_position_embeddings
lowerCamelCase :List[str] = layernorm_embedding
super().__init__(
pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , decoder_start_token_id=A__ , **A__ , )
| 700
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 0
|
def _lowerCamelCase ( a_ : Any):
lowerCamelCase :Optional[int] = []
lowerCamelCase :Optional[Any] = []
lowerCamelCase :List[str] = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
lowerCamelCase :Union[str, Any] = len(_SCREAMING_SNAKE_CASE) if (len(_SCREAMING_SNAKE_CASE) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8) , '''Stack'''.center(_SCREAMING_SNAKE_CASE) , '''Postfix'''.center(_SCREAMING_SNAKE_CASE) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_SCREAMING_SNAKE_CASE) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_SCREAMING_SNAKE_CASE) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_SCREAMING_SNAKE_CASE) == 0:
stack.append(_SCREAMING_SNAKE_CASE) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_SCREAMING_SNAKE_CASE) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(_SCREAMING_SNAKE_CASE) # push x to stack
print(
x.center(8) , (''''''.join(_SCREAMING_SNAKE_CASE)).ljust(_SCREAMING_SNAKE_CASE) , (''''''.join(_SCREAMING_SNAKE_CASE)).ljust(_SCREAMING_SNAKE_CASE) , sep=''' | ''' , ) # Output in tabular format
while len(_SCREAMING_SNAKE_CASE) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
''' '''.center(8) , (''''''.join(_SCREAMING_SNAKE_CASE)).ljust(_SCREAMING_SNAKE_CASE) , (''''''.join(_SCREAMING_SNAKE_CASE)).ljust(_SCREAMING_SNAKE_CASE) , sep=''' | ''' , ) # Output in tabular format
return "".join(_SCREAMING_SNAKE_CASE) # return Postfix as str
def _lowerCamelCase ( a_ : Union[str, Any]):
lowerCamelCase :Optional[Any] = list(infix[::-1]) # reverse the infix equation
for i in range(len(_SCREAMING_SNAKE_CASE)):
if infix[i] == "(":
lowerCamelCase :int = ''')''' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase :Any = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_SCREAMING_SNAKE_CASE)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A__ = input("""\nEnter an Infix Equation = """) # Input an Infix equation
A__ = """""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 701
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 0
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = '''ylacombe/bark-small'''
lowerCamelCase :int = tempfile.mkdtemp()
lowerCamelCase :Dict = '''en_speaker_1'''
lowerCamelCase :int = '''This is a test string'''
lowerCamelCase :int = '''speaker_embeddings_path.json'''
lowerCamelCase :Dict = '''speaker_embeddings'''
def snake_case ( self : Tuple , **__snake_case : int ):
return AutoTokenizer.from_pretrained(self.checkpoint , **__UpperCamelCase )
def snake_case ( self : str ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = self.get_tokenizer()
lowerCamelCase :Optional[int] = BarkProcessor(tokenizer=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase :Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase :Optional[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase :List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase :Optional[int] = 35
lowerCamelCase :Optional[int] = 2
lowerCamelCase :str = 8
lowerCamelCase :Optional[int] = {
'''semantic_prompt''': np.ones(__UpperCamelCase ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase :Optional[int] = processor(text=self.input_string , voice_preset=__UpperCamelCase )
lowerCamelCase :int = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase :int = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase :Optional[Any] = processor(text=self.input_string , voice_preset=__UpperCamelCase )
lowerCamelCase :str = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase :str = processor(text=self.input_string , voice_preset=self.voice_preset )
def snake_case ( self : int ):
lowerCamelCase :int = self.get_tokenizer()
lowerCamelCase :List[str] = BarkProcessor(tokenizer=__UpperCamelCase )
lowerCamelCase :List[Any] = processor(text=self.input_string )
lowerCamelCase :Tuple = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 702
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 0
|
def _lowerCamelCase ( a_ : Optional[int] = 1_00_00_00):
lowerCamelCase :Tuple = 1
lowerCamelCase :Any = 1
lowerCamelCase :Union[str, Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_):
lowerCamelCase :List[Any] = 0
lowerCamelCase :Tuple = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase :Optional[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase :Optional[int] = counter
if counter > pre_counter:
lowerCamelCase :Optional[int] = inputa
lowerCamelCase :Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 703
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 0
|
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
A__ = logging.getLogger(__name__)
if __name__ == "__main__":
A__ = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30_522, type=int)
A__ = parser.parse_args()
logger.info(F'Loading data from {args.data_file}')
with open(args.data_file, """rb""") as fp:
A__ = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
A__ = Counter()
for tk_ids in data:
counter.update(tk_ids)
A__ = [0] * args.vocab_size
for k, v in counter.items():
A__ = v
logger.info(F'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 704
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from functools import reduce
A__ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _lowerCamelCase ( a_ : str = N):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda a_ , a_: str(int(a_) * int(a_)) , n[i : i + 13]))
for i in range(len(a_) - 12))
if __name__ == "__main__":
print(F'{solution() = }')
| 705
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
def _lowerCamelCase ( a_ : Tuple , a_ : Optional[int]):
lowerCamelCase :Tuple = (boundary[1] - boundary[0]) / steps
lowerCamelCase :Any = boundary[0]
lowerCamelCase :str = boundary[1]
lowerCamelCase :Tuple = make_points(lowercase_ , lowercase_ , lowercase_)
lowerCamelCase :Union[str, Any] = 0.0
y += (h / 2.0) * f(lowercase_)
for i in x_i:
# print(i)
y += h * f(lowercase_)
y += (h / 2.0) * f(lowercase_)
return y
def _lowerCamelCase ( a_ : Dict , a_ : List[str] , a_ : str):
lowerCamelCase :Optional[Any] = a + h
while x < (b - h):
yield x
lowerCamelCase :Optional[Any] = x + h
def _lowerCamelCase ( a_ : Dict): # enter your function here
lowerCamelCase :Dict = (x - 0) * (x - 0)
return y
def _lowerCamelCase ( ):
lowerCamelCase :int = 0.0 # Lower bound of integration
lowerCamelCase :List[str] = 1.0 # Upper bound of integration
lowerCamelCase :List[str] = 10.0 # define number of steps or resolution
lowerCamelCase :List[Any] = [a, b] # define boundary of integration
lowerCamelCase :Dict = method_a(lowercase_ , lowercase_)
print(F"y = {y}")
if __name__ == "__main__":
main()
| 706
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
_UpperCAmelCase = "transfo-xl"
_UpperCAmelCase = ["mems"]
_UpperCAmelCase = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , __snake_case : Tuple=267735 , __snake_case : Union[str, Any]=[20000, 40000, 200000] , __snake_case : int=1024 , __snake_case : int=1024 , __snake_case : int=16 , __snake_case : Any=64 , __snake_case : int=4096 , __snake_case : Dict=4 , __snake_case : int=False , __snake_case : Optional[int]=18 , __snake_case : List[str]=1600 , __snake_case : Optional[Any]=1000 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=0 , __snake_case : Union[str, Any]=-1 , __snake_case : Tuple=True , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.0 , __snake_case : str=True , __snake_case : Union[str, Any]="normal" , __snake_case : Optional[Any]=0.0_1 , __snake_case : List[str]=0.0_1 , __snake_case : Union[str, Any]=0.0_2 , __snake_case : Optional[int]=1e-5 , __snake_case : int=0 , **__snake_case : Union[str, Any] , ):
lowerCamelCase :Optional[Any] = vocab_size
lowerCamelCase :Any = []
self.cutoffs.extend(_snake_case )
if proj_share_all_but_first:
lowerCamelCase :str = [False] + [True] * len(self.cutoffs )
else:
lowerCamelCase :int = [False] + [False] * len(self.cutoffs )
lowerCamelCase :List[Any] = d_model
lowerCamelCase :int = d_embed
lowerCamelCase :Optional[int] = d_head
lowerCamelCase :List[str] = d_inner
lowerCamelCase :List[str] = div_val
lowerCamelCase :Tuple = pre_lnorm
lowerCamelCase :Tuple = n_layer
lowerCamelCase :Tuple = n_head
lowerCamelCase :List[Any] = mem_len
lowerCamelCase :Dict = same_length
lowerCamelCase :Optional[Any] = attn_type
lowerCamelCase :Optional[Any] = clamp_len
lowerCamelCase :Optional[int] = sample_softmax
lowerCamelCase :Union[str, Any] = adaptive
lowerCamelCase :int = dropout
lowerCamelCase :Union[str, Any] = dropatt
lowerCamelCase :int = untie_r
lowerCamelCase :int = init
lowerCamelCase :Optional[int] = init_range
lowerCamelCase :str = proj_init_std
lowerCamelCase :Tuple = init_std
lowerCamelCase :Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=_snake_case , **_snake_case )
@property
def snake_case ( self : Optional[Any] ):
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def snake_case ( self : Any , __snake_case : Any ):
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 707
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
A__ = logging.getLogger()
A__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowerCAmelCase ( _lowerCAmelCase ):
def snake_case ( self : int , __snake_case : Dict ):
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
lowerCamelCase :int = {'''source''': '''What is love ?''', '''target''': '''life'''}
lowerCamelCase :Optional[Any] = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase :Union[str, Any] = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(_lowerCAmelCase , F"{split}.{field}" ) , '''w''' ) as f:
f.write(_lowerCAmelCase )
def snake_case ( self : Union[str, Any] , __snake_case : int , __snake_case : str = "pytorch" ):
lowerCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCamelCase :str = os.path.join(_lowerCAmelCase , '''output''' )
lowerCamelCase :Any = os.path.join(_lowerCAmelCase , '''data''' )
self._create_dummy_data(data_dir=_lowerCAmelCase )
lowerCamelCase :Any = F"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(F"--gpus={gpus}" )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
lowerCamelCase :Dict = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
lowerCamelCase :Union[str, Any] = os.path.join(_lowerCAmelCase , '''metrics.json''' )
with open(_lowerCAmelCase ) as f:
lowerCamelCase :Optional[int] = json.load(_lowerCAmelCase )
return result
@require_torch_gpu
def snake_case ( self : List[str] ):
lowerCamelCase :int = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def snake_case ( self : Dict ):
lowerCamelCase :Dict = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[int] = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 708
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 0
|
import mpmath # for roots of unity
import numpy as np
class _lowerCAmelCase :
def __init__( self : Union[str, Any] , __snake_case : Optional[Any]=None , __snake_case : Optional[int]=None ):
# Input as list
lowerCamelCase :Optional[int] = list(poly_a or [0] )[:]
lowerCamelCase :List[str] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowerCamelCase :int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowerCamelCase :Optional[int] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowerCamelCase :Union[str, Any] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowerCamelCase :Tuple = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowerCamelCase :Dict = self.__multiply()
def snake_case ( self : List[str] , __snake_case : Tuple ):
lowerCamelCase :List[Any] = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(_a ) <= 1:
return dft[0]
#
lowerCamelCase :Any = self.c_max_length // 2
while next_ncol > 0:
lowerCamelCase :int = [[] for i in range(_a )]
lowerCamelCase :Any = self.root**next_ncol
# First half of next step
lowerCamelCase :List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_a ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowerCamelCase :Tuple = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_a ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowerCamelCase :str = new_dft
lowerCamelCase :Optional[int] = next_ncol // 2
return dft[0]
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = self.__dft('''A''' )
lowerCamelCase :Optional[int] = self.__dft('''B''' )
lowerCamelCase :List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowerCamelCase :Optional[int] = 2
while next_ncol <= self.c_max_length:
lowerCamelCase :str = [[] for i in range(_a )]
lowerCamelCase :Optional[Any] = self.root ** (next_ncol // 2)
lowerCamelCase :Union[str, Any] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowerCamelCase :Any = new_inverse_c
next_ncol *= 2
# Unpack
lowerCamelCase :List[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Tuple ):
lowerCamelCase :Optional[Any] = """A = """ + """ + """.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowerCamelCase :Tuple = """B = """ + """ + """.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowerCamelCase :Any = """A*B = """ + """ + """.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.product ) )
return F"{a}\n{b}\n{c}"
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 0
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
def __init__( self : Union[str, Any] , __snake_case : Any ):
super().__init__()
lowerCamelCase :Dict = model
lowerCamelCase :Union[str, Any] = 2
lowerCamelCase :Any = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self : Dict ):
pass
def _lowerCamelCase ( a_ : str , a_ : str , a_ : str):
lowerCamelCase :List[Any] = LongformerModel.from_pretrained(__UpperCamelCase)
lowerCamelCase :Optional[int] = LightningModel(__UpperCamelCase)
lowerCamelCase :List[Any] = torch.load(__UpperCamelCase , map_location=torch.device('''cpu'''))
lightning_model.load_state_dict(ckpt['''state_dict'''])
# init longformer question answering model
lowerCamelCase :Dict = LongformerForQuestionAnswering.from_pretrained(__UpperCamelCase)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__UpperCamelCase)
print(F"Conversion successful. Model saved under {pytorch_dump_folder_path}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _lowerCamelCase ( a_ : int):
lowerCamelCase :List[str] = int(number**0.5)
return number == sq * sq
def _lowerCamelCase ( a_ : int , a_ : int , a_ : int , a_ : int , a_ : int , a_ : int):
lowerCamelCase :Tuple = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCamelCase :Union[str, Any] = x_den * y_den * z_den
lowerCamelCase :Union[str, Any] = gcd(lowerCamelCase_ , lowerCamelCase_)
top //= hcf
bottom //= hcf
return top, bottom
def _lowerCamelCase ( a_ : int = 35):
lowerCamelCase :str = set()
lowerCamelCase :Optional[Any] = 42
lowerCamelCase :str = Fraction(0)
lowerCamelCase :Dict = 42
for x_num in range(1 , order + 1):
for x_den in range(x_num + 1 , order + 1):
for y_num in range(1 , order + 1):
for y_den in range(y_num + 1 , order + 1):
# n=1
lowerCamelCase :Any = x_num * y_den + x_den * y_num
lowerCamelCase :Tuple = x_den * y_den
lowerCamelCase :Tuple = gcd(lowerCamelCase_ , lowerCamelCase_)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase :str = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
unique_s.add(lowerCamelCase_)
# n=2
lowerCamelCase :str = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCamelCase :List[str] = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase_) and is_sq(lowerCamelCase_):
lowerCamelCase :Optional[int] = int(sqrt(lowerCamelCase_))
lowerCamelCase :str = int(sqrt(lowerCamelCase_))
lowerCamelCase :Union[str, Any] = gcd(lowerCamelCase_ , lowerCamelCase_)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase :Tuple = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
unique_s.add(lowerCamelCase_)
# n=-1
lowerCamelCase :Dict = x_num * y_num
lowerCamelCase :Optional[Any] = x_den * y_num + x_num * y_den
lowerCamelCase :Union[str, Any] = gcd(lowerCamelCase_ , lowerCamelCase_)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase :int = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
unique_s.add(lowerCamelCase_)
# n=2
lowerCamelCase :Any = x_num * x_num * y_num * y_num
lowerCamelCase :Union[str, Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase_) and is_sq(lowerCamelCase_):
lowerCamelCase :List[Any] = int(sqrt(lowerCamelCase_))
lowerCamelCase :Tuple = int(sqrt(lowerCamelCase_))
lowerCamelCase :int = gcd(lowerCamelCase_ , lowerCamelCase_)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase :Any = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
unique_s.add(lowerCamelCase_)
for num, den in unique_s:
total += Fraction(lowerCamelCase_ , lowerCamelCase_)
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 711
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A__ = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
A__ = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class _lowerCAmelCase ( __lowercase ):
_UpperCAmelCase = 'ernie_m'
_UpperCAmelCase = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[str] , __snake_case : List[Any] = 250002 , __snake_case : Optional[Any] = 768 , __snake_case : List[Any] = 12 , __snake_case : str = 12 , __snake_case : str = 3072 , __snake_case : Union[str, Any] = "gelu" , __snake_case : Any = 0.1 , __snake_case : Union[str, Any] = 0.1 , __snake_case : str = 514 , __snake_case : int = 0.0_2 , __snake_case : Any = 1 , __snake_case : Union[str, Any] = 1e-0_5 , __snake_case : Any=None , __snake_case : Tuple=False , __snake_case : Tuple=0.0 , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__A , **__A )
lowerCamelCase :Tuple = vocab_size
lowerCamelCase :int = hidden_size
lowerCamelCase :Union[str, Any] = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Dict = intermediate_size
lowerCamelCase :Tuple = hidden_act
lowerCamelCase :Any = hidden_dropout_prob
lowerCamelCase :int = attention_probs_dropout_prob
lowerCamelCase :Union[str, Any] = max_position_embeddings
lowerCamelCase :Tuple = initializer_range
lowerCamelCase :Optional[Any] = layer_norm_eps
lowerCamelCase :Union[str, Any] = classifier_dropout
lowerCamelCase :Union[str, Any] = is_decoder
lowerCamelCase :int = act_dropout
| 713
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 0
|
import random
def _lowerCAmelCase ( a_ : List[str] , a_ : str , a_ : str):
lowerCamelCase :Dict = a[left_index]
lowerCamelCase :Union[str, Any] = left_index + 1
for j in range(left_index + 1 , UpperCamelCase__):
if a[j] < pivot:
lowerCamelCase , lowerCamelCase :Optional[int] = a[i], a[j]
i += 1
lowerCamelCase , lowerCamelCase :int = a[i - 1], a[left_index]
return i - 1
def _lowerCAmelCase ( a_ : Optional[int] , a_ : List[Any] , a_ : str):
if left < right:
lowerCamelCase :Dict = random.randint(UpperCamelCase__ , right - 1)
lowerCamelCase , lowerCamelCase :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowerCamelCase :Tuple = partition(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
quick_sort_random(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) # recursive quicksort to the left of the pivot point
quick_sort_random(
UpperCamelCase__ , pivot_index + 1 , UpperCamelCase__) # recursive quicksort to the right of the pivot point
def _lowerCAmelCase ( ):
lowerCamelCase :Tuple = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase :Union[str, Any] = [int(UpperCamelCase__) for item in user_input.split(''',''')]
quick_sort_random(UpperCamelCase__ , 0 , len(UpperCamelCase__))
print(UpperCamelCase__)
if __name__ == "__main__":
main()
| 714
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( __UpperCAmelCase ):
_UpperCAmelCase = '''wavlm'''
def __init__( self : int , __snake_case : int=32 , __snake_case : Optional[int]=768 , __snake_case : Optional[int]=12 , __snake_case : List[Any]=12 , __snake_case : List[Any]=3072 , __snake_case : Tuple="gelu" , __snake_case : List[str]=0.1 , __snake_case : Any=0.1 , __snake_case : int=0.1 , __snake_case : Tuple=0.0 , __snake_case : Any=0.1 , __snake_case : str=0.1 , __snake_case : Any=0.0_2 , __snake_case : Optional[int]=1e-5 , __snake_case : int="group" , __snake_case : List[str]="gelu" , __snake_case : int=(512, 512, 512, 512, 512, 512, 512) , __snake_case : int=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Any=(10, 3, 3, 3, 3, 2, 2) , __snake_case : List[str]=False , __snake_case : Any=128 , __snake_case : Optional[int]=16 , __snake_case : Dict=320 , __snake_case : str=800 , __snake_case : int=False , __snake_case : Union[str, Any]=True , __snake_case : str=0.0_5 , __snake_case : Optional[int]=10 , __snake_case : Dict=2 , __snake_case : Dict=0.0 , __snake_case : Tuple=10 , __snake_case : str=320 , __snake_case : Dict=2 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=100 , __snake_case : Optional[Any]=256 , __snake_case : List[str]=256 , __snake_case : Tuple=0.1 , __snake_case : List[str]="mean" , __snake_case : Tuple=False , __snake_case : Optional[int]=False , __snake_case : Dict=256 , __snake_case : Any=(512, 512, 512, 512, 1500) , __snake_case : List[str]=(5, 3, 3, 1, 1) , __snake_case : List[str]=(1, 2, 3, 1, 1) , __snake_case : Optional[Any]=512 , __snake_case : Any=80 , __snake_case : Optional[int]=0 , __snake_case : int=1 , __snake_case : List[str]=2 , __snake_case : Union[str, Any]=False , __snake_case : Optional[int]=3 , __snake_case : Dict=2 , __snake_case : Tuple=3 , __snake_case : Any=None , **__snake_case : int , ):
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
lowerCamelCase :Optional[int] = hidden_size
lowerCamelCase :List[Any] = feat_extract_norm
lowerCamelCase :Optional[Any] = feat_extract_activation
lowerCamelCase :List[Any] = list(__SCREAMING_SNAKE_CASE )
lowerCamelCase :List[Any] = list(__SCREAMING_SNAKE_CASE )
lowerCamelCase :List[Any] = list(__SCREAMING_SNAKE_CASE )
lowerCamelCase :Optional[int] = conv_bias
lowerCamelCase :List[Any] = num_buckets
lowerCamelCase :Optional[Any] = max_bucket_distance
lowerCamelCase :int = num_conv_pos_embeddings
lowerCamelCase :List[Any] = num_conv_pos_embedding_groups
lowerCamelCase :Dict = len(self.conv_dim )
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :int = intermediate_size
lowerCamelCase :List[Any] = hidden_act
lowerCamelCase :int = num_attention_heads
lowerCamelCase :List[Any] = hidden_dropout
lowerCamelCase :Tuple = attention_dropout
lowerCamelCase :List[str] = activation_dropout
lowerCamelCase :List[Any] = feat_proj_dropout
lowerCamelCase :Dict = final_dropout
lowerCamelCase :Optional[int] = layerdrop
lowerCamelCase :Dict = layer_norm_eps
lowerCamelCase :str = initializer_range
lowerCamelCase :List[Any] = num_ctc_classes
lowerCamelCase :int = vocab_size
lowerCamelCase :Optional[Any] = do_stable_layer_norm
lowerCamelCase :List[str] = use_weighted_layer_sum
lowerCamelCase :str = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase :Dict = apply_spec_augment
lowerCamelCase :Any = mask_time_prob
lowerCamelCase :str = mask_time_length
lowerCamelCase :List[Any] = mask_time_min_masks
lowerCamelCase :Union[str, Any] = mask_feature_prob
lowerCamelCase :Union[str, Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCamelCase :List[Any] = num_codevectors_per_group
lowerCamelCase :Tuple = num_codevector_groups
lowerCamelCase :Optional[int] = contrastive_logits_temperature
lowerCamelCase :Any = num_negatives
lowerCamelCase :Union[str, Any] = codevector_dim
lowerCamelCase :List[str] = proj_codevector_dim
lowerCamelCase :Union[str, Any] = diversity_loss_weight
# ctc loss
lowerCamelCase :str = ctc_loss_reduction
lowerCamelCase :Dict = ctc_zero_infinity
# adapter
lowerCamelCase :Dict = add_adapter
lowerCamelCase :List[str] = adapter_kernel_size
lowerCamelCase :Tuple = adapter_stride
lowerCamelCase :str = num_adapter_layers
lowerCamelCase :List[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase :int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase :str = list(__SCREAMING_SNAKE_CASE )
lowerCamelCase :Any = list(__SCREAMING_SNAKE_CASE )
lowerCamelCase :Tuple = list(__SCREAMING_SNAKE_CASE )
lowerCamelCase :Dict = xvector_output_dim
@property
def snake_case ( self : Optional[int] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 715
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 0
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
A__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def _lowerCamelCase ( a_ : int , a_ : tuple , a_ : Path , a_ : List[Any] , a_ : List[str] , a_ : List[str] , a_ : Dict , a_ : Any=False , ):
output_path.parent.mkdir(parents=a_ , exist_ok=a_)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , use_external_data_format=a_ , enable_onnx_checker=a_ , opset_version=a_ , )
else:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , opset_version=a_ , )
@torch.no_grad()
def _lowerCamelCase ( a_ : str , a_ : str , a_ : int , a_ : bool = False):
lowerCamelCase :Optional[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''')
else:
lowerCamelCase :int = 'cpu'
lowerCamelCase :Any = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=a_).to(a_)
lowerCamelCase :Optional[int] = Path(a_)
# TEXT ENCODER
lowerCamelCase :Any = pipeline.text_encoder.config.max_position_embeddings
lowerCamelCase :Tuple = pipeline.text_encoder.config.hidden_size
lowerCamelCase :str = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=a_ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=a_ , dtype=torch.intaa)) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , )
del pipeline.text_encoder
# UNET
lowerCamelCase :List[str] = pipeline.unet.config.in_channels
lowerCamelCase :str = pipeline.unet.config.sample_size
lowerCamelCase :List[str] = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , a_ , a_ , a_).to(device=a_ , dtype=a_),
torch.randn(2).to(device=a_ , dtype=a_),
torch.randn(2 , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=a_ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , use_external_data_format=a_ , )
lowerCamelCase :Optional[int] = str(unet_path.absolute().as_posix())
lowerCamelCase :List[Any] = os.path.dirname(a_)
lowerCamelCase :List[str] = onnx.load(a_)
# clean up existing tensor files
shutil.rmtree(a_)
os.mkdir(a_)
# collate external tensor files into one
onnx.save_model(
a_ , a_ , save_as_external_data=a_ , all_tensors_to_one_file=a_ , location='''weights.pb''' , convert_attribute=a_ , )
del pipeline.unet
# VAE ENCODER
lowerCamelCase :Any = pipeline.vae
lowerCamelCase :Optional[Any] = vae_encoder.config.in_channels
lowerCamelCase :List[Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCamelCase :Dict = lambda a_ , a_: vae_encoder.encode(a_ , a_)[0].sample()
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
# VAE DECODER
lowerCamelCase :List[str] = pipeline.vae
lowerCamelCase :Any = vae_decoder.config.latent_channels
lowerCamelCase :Optional[Any] = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCamelCase :List[str] = vae_encoder.decode
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCamelCase :int = pipeline.safety_checker
lowerCamelCase :List[str] = safety_checker.config.vision_config.num_channels
lowerCamelCase :str = safety_checker.config.vision_config.image_size
lowerCamelCase :Tuple = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , a_ , a_ , a_ , ).to(device=a_ , dtype=a_),
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=a_ , )
del pipeline.safety_checker
lowerCamelCase :Any = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''')
lowerCamelCase :Optional[Any] = pipeline.feature_extractor
else:
lowerCamelCase :List[str] = None
lowerCamelCase :List[str] = None
lowerCamelCase :Dict = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''') , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''') , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''') , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''') , scheduler=pipeline.scheduler , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(a_)
print('''ONNX pipeline saved to''' , a_)
del pipeline
del onnx_pipeline
lowerCamelCase :Any = OnnxStableDiffusionPipeline.from_pretrained(a_ , provider='''CPUExecutionProvider''')
print('''ONNX pipeline is loadable''')
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
A__ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 716
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 0
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
A__ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , __snake_case : str , __snake_case : Dict=7 , __snake_case : Tuple=3 , __snake_case : Dict=18 , __snake_case : Dict=30 , __snake_case : Tuple=400 , __snake_case : Optional[int]=None , __snake_case : List[Any]=True , __snake_case : int=True , __snake_case : Optional[int]=None , ):
lowerCamelCase :Optional[int] = size if size is not None else {'''height''': 20, '''width''': 20}
lowerCamelCase :Dict = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Union[str, Any] = num_channels
lowerCamelCase :str = image_size
lowerCamelCase :Optional[Any] = min_resolution
lowerCamelCase :int = max_resolution
lowerCamelCase :Optional[int] = size
lowerCamelCase :int = do_normalize
lowerCamelCase :Optional[Any] = do_convert_rgb
lowerCamelCase :Optional[int] = [512, 1024, 2048, 4096]
lowerCamelCase :List[str] = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def snake_case ( self : Tuple ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def snake_case ( self : str ):
lowerCamelCase :Tuple = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
lowerCamelCase :Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[int] = PixaStructImageProcessingTester(self )
@property
def snake_case ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''do_convert_rgb''' ) )
def snake_case ( self : Any ):
lowerCamelCase :List[Any] = self.image_processor_tester.prepare_dummy_image()
lowerCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
lowerCamelCase :str = 2048
lowerCamelCase :Union[str, Any] = image_processor(__A , return_tensors='''pt''' , max_patches=__A )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1e-3 , rtol=1e-3 ) )
def snake_case ( self : Optional[int] ):
# Initialize image_processor
lowerCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowerCamelCase :Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase :Dict = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase :Union[str, Any] = image_processor(
__A , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case ( self : Any ):
# Initialize image_processor
lowerCamelCase :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowerCamelCase :Union[str, Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
lowerCamelCase :Union[str, Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__A ):
lowerCamelCase :Tuple = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches
lowerCamelCase :int = '''Hello'''
lowerCamelCase :int = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__A , header_text=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase :Optional[Any] = image_processor(
__A , return_tensors='''pt''' , max_patches=__A , header_text=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case ( self : Optional[int] ):
# Initialize image_processor
lowerCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
lowerCamelCase :Union[str, Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase :Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase :Dict = image_processor(
__A , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case ( self : Tuple ):
# Initialize image_processor
lowerCamelCase :str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
lowerCamelCase :List[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase :List[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase :Optional[Any] = image_processor(
__A , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def snake_case ( self : Optional[int] ):
lowerCamelCase :Tuple = PixaStructImageProcessingTester(self , num_channels=4 )
lowerCamelCase :Any = 3
@property
def snake_case ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : str ):
lowerCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''do_convert_rgb''' ) )
def snake_case ( self : List[Any] ):
# Initialize image_processor
lowerCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowerCamelCase :Optional[int] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase :Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase :Optional[int] = image_processor(
__A , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 717
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
from __future__ import annotations
from typing import TypedDict
class _lowerCAmelCase ( __snake_case ):
_UpperCAmelCase = 4_2
_UpperCAmelCase = 4_2
def _lowerCamelCase ( a_ : str):
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise TypeError('''The parameter s type must be str.''')
return [s[i:] + s[:i] for i in range(len(_lowerCamelCase))]
def _lowerCamelCase ( a_ : str):
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise TypeError('''The parameter s type must be str.''')
if not s:
raise ValueError('''The parameter s must not be empty.''')
lowerCamelCase :int = all_rotations(_lowerCamelCase)
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
lowerCamelCase :BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations]),
"idx_original_string": rotations.index(_lowerCamelCase),
}
return response
def _lowerCamelCase ( a_ : str , a_ : int):
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise TypeError('''The parameter bwt_string type must be str.''')
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''')
try:
lowerCamelCase :int = int(_lowerCamelCase)
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''')
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''')
if idx_original_string >= len(_lowerCamelCase):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''')
lowerCamelCase :Tuple = [""""""] * len(_lowerCamelCase)
for _ in range(len(_lowerCamelCase)):
for i in range(len(_lowerCamelCase)):
lowerCamelCase :Optional[Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A__ = """Provide a string that I will generate its BWT transform: """
A__ = input(entry_msg).strip()
A__ = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
A__ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 718
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A__ = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _lowerCamelCase ( a_ : Optional[Any]):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_)
def _lowerCamelCase ( a_ : Any):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCamelCase :List[str] = terminalreporter.config.getoption('''--make-reports''')
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_)
| 719
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 0
|
from __future__ import annotations
from collections.abc import Generator
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :Optional[int] = 2
while True:
lowerCamelCase :Optional[Any] = factor_map.pop(lowerCAmelCase__ , lowerCAmelCase__)
if factor:
lowerCamelCase :Tuple = factor + prime
while x in factor_map:
x += factor
lowerCamelCase :int = factor
else:
lowerCamelCase :Optional[Any] = prime
yield prime
prime += 1
def _lowerCamelCase ( a_ : float = 1e1_0):
lowerCamelCase :Union[str, Any] = sieve()
lowerCamelCase :Dict = 1
while True:
lowerCamelCase :Union[str, Any] = next(lowerCAmelCase__)
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCAmelCase__)
n += 2
if __name__ == "__main__":
print(solution())
| 720
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 0
|
from __future__ import annotations
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A__ : Optional[int] = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _lowerCamelCase ( a_ : int):
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise ValueError('''n must be an integer''')
if n <= 0:
raise ValueError('''n must be >= 0''')
lowerCamelCase :Any = []
for num in range(len(_lowerCamelCase)):
lowerCamelCase :List[Any] = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase :Optional[Any] = odd_composites[num] - 2 * i * i
if is_prime(_lowerCamelCase):
break
i += 1
else:
list_nums.append(odd_composites[num])
if len(_lowerCamelCase) == n:
return list_nums
return []
def _lowerCamelCase ( ):
return compute_nums(1)[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 721
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 0
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _lowerCamelCase ( a_ : str):
lowerCamelCase :Optional[Any] = args.pruning_method
lowerCamelCase :List[Any] = args.threshold
lowerCamelCase :Optional[Any] = args.model_name_or_path.rstrip('''/''')
lowerCamelCase :Union[str, Any] = args.target_model_path
print(F"Load fine-pruned model from {model_name_or_path}")
lowerCamelCase :Union[str, Any] = torch.load(os.path.join(a_ , '''pytorch_model.bin'''))
lowerCamelCase :List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCamelCase :int = tensor
print(F"Copied layer {name}")
elif "classifier" in name or "qa_output" in name:
lowerCamelCase :Optional[int] = tensor
print(F"Copied layer {name}")
elif "bias" in name:
lowerCamelCase :Any = tensor
print(F"Copied layer {name}")
else:
if pruning_method == "magnitude":
lowerCamelCase :Tuple = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_)
lowerCamelCase :Union[str, Any] = tensor * mask
print(F"Pruned layer {name}")
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCamelCase :int = name[:-6]
lowerCamelCase :Dict = model[F"{prefix_}mask_scores"]
lowerCamelCase :Tuple = TopKBinarizer.apply(a_ , a_)
lowerCamelCase :Dict = tensor * mask
print(F"Pruned layer {name}")
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCamelCase :int = name[:-6]
lowerCamelCase :int = model[F"{prefix_}mask_scores"]
lowerCamelCase :List[Any] = ThresholdBinarizer.apply(a_ , a_ , a_)
lowerCamelCase :Any = tensor * mask
print(F"Pruned layer {name}")
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCamelCase :List[Any] = name[:-6]
lowerCamelCase :Optional[Any] = model[F"{prefix_}mask_scores"]
lowerCamelCase , lowerCamelCase :Optional[Any] = -0.1, 1.1
lowerCamelCase :List[str] = torch.sigmoid(a_)
lowerCamelCase :List[Any] = s * (r - l) + l
lowerCamelCase :Dict = s_bar.clamp(min=0.0 , max=1.0)
lowerCamelCase :str = tensor * mask
print(F"Pruned layer {name}")
else:
raise ValueError('''Unknown pruning method''')
if target_model_path is None:
lowerCamelCase :Tuple = os.path.join(
os.path.dirname(a_) , F"bertarized_{os.path.basename(a_)}")
if not os.path.isdir(a_):
shutil.copytree(a_ , a_)
print(F"\nCreated folder {target_model_path}")
torch.save(a_ , os.path.join(a_ , '''pytorch_model.bin'''))
print('''\nPruned model saved! See you later!''')
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
A__ = parser.parse_args()
main(args)
| 700
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 0
|
import argparse
from collections import defaultdict
import yaml
A__ = """docs/source/en/_toctree.yml"""
def _lowerCamelCase ( a_ : List[Any]):
lowerCamelCase :str = defaultdict(SCREAMING_SNAKE_CASE_)
for doc in model_doc:
counts[doc["local"]] += 1
lowerCamelCase :str = [key for key, value in counts.items() if value > 1]
lowerCamelCase :Any = []
for duplicate_key in duplicates:
lowerCamelCase :Any = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key})
if len(SCREAMING_SNAKE_CASE_) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''')
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1])
# Sort
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda a_: s["title"].lower())
def _lowerCamelCase ( a_ : str=False):
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''') as f:
lowerCamelCase :List[str] = yaml.safe_load(f.read())
# Get to the API doc
lowerCamelCase :Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase :Dict = content[api_idx]["sections"]
# Then to the model doc
lowerCamelCase :int = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCamelCase :List[Any] = api_doc[model_idx]["sections"]
lowerCamelCase :Union[str, Any] = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE_) if "sections" in section]
lowerCamelCase :Tuple = False
for idx, modality_doc in modalities_docs:
lowerCamelCase :str = modality_doc["sections"]
lowerCamelCase :int = clean_model_doc_toc(SCREAMING_SNAKE_CASE_)
if old_modality_doc != new_modality_doc:
lowerCamelCase :List[Any] = True
if overwrite:
lowerCamelCase :Optional[int] = new_modality_doc
if diff:
if overwrite:
lowerCamelCase :Optional[int] = model_doc
lowerCamelCase :List[Any] = api_doc
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''') as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE_ , allow_unicode=SCREAMING_SNAKE_CASE_))
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''')
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 701
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 0
|
from math import pi
def _lowerCamelCase ( a_ : int , a_ : int):
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 702
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 0
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A__ = logging.get_logger(__name__)
@add_end_docstrings(__A )
class _lowerCAmelCase ( __A ):
def __init__( self : Dict , *__snake_case : Optional[Any] , **__snake_case : str ):
super().__init__(*__snake_case , **__snake_case )
self.check_model_type(__snake_case )
def snake_case ( self : Any , __snake_case : List[str]=None , __snake_case : Any=None , __snake_case : str=None , **__snake_case : List[str] ):
lowerCamelCase :List[Any] = {}, {}
if padding is not None:
lowerCamelCase :Union[str, Any] = padding
if truncation is not None:
lowerCamelCase :Optional[Any] = truncation
if top_k is not None:
lowerCamelCase :int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : str , __snake_case : List[Any] , __snake_case : int = None , **__snake_case : Optional[int] ):
if isinstance(__snake_case , (Image.Image, str) ) and isinstance(__snake_case , __snake_case ):
lowerCamelCase :Tuple = {'image': image, 'question': question}
else:
lowerCamelCase :Union[str, Any] = image
lowerCamelCase :str = super().__call__(__snake_case , **__snake_case )
return results
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
lowerCamelCase :Any = load_image(inputs['''image'''] )
lowerCamelCase :Any = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=__snake_case , truncation=__snake_case )
lowerCamelCase :Dict = self.image_processor(images=__snake_case , return_tensors=self.framework )
model_inputs.update(__snake_case )
return model_inputs
def snake_case ( self : Any , __snake_case : int ):
lowerCamelCase :List[Any] = self.model(**__snake_case )
return model_outputs
def snake_case ( self : Tuple , __snake_case : int , __snake_case : Tuple=5 ):
if top_k > self.model.config.num_labels:
lowerCamelCase :str = self.model.config.num_labels
if self.framework == "pt":
lowerCamelCase :int = model_outputs.logits.sigmoid()[0]
lowerCamelCase :Optional[int] = probs.topk(__snake_case )
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCamelCase :List[str] = scores.tolist()
lowerCamelCase :int = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__snake_case , __snake_case )]
| 703
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 0
|
'''simple docstring'''
def _lowerCamelCase ( a_ : Optional[int] , a_ : Optional[Any]):
return abs(__snake_case) if a == 0 else greatest_common_divisor(b % a , __snake_case)
def _lowerCamelCase ( a_ : int , a_ : Dict):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCamelCase , lowerCamelCase :Optional[int] = y, x % y
return abs(__snake_case)
def _lowerCamelCase ( ):
try:
lowerCamelCase :int = input('''Enter two integers separated by comma (,): ''').split(''',''')
lowerCamelCase :List[str] = int(nums[0])
lowerCamelCase :List[Any] = int(nums[1])
print(
F"greatest_common_divisor({num_a}, {num_a}) = "
F"{greatest_common_divisor(__snake_case , __snake_case)}")
print(F"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__snake_case , __snake_case)}")
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''')
if __name__ == "__main__":
main()
| 704
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A__ = 16
A__ = 32
def _lowerCamelCase ( a_ : Tuple , a_ : Any = 16 , a_ : List[Any] = "bert-base-cased"):
lowerCamelCase :Any = AutoTokenizer.from_pretrained(lowerCAmelCase__)
lowerCamelCase :List[str] = load_dataset('''glue''' , '''mrpc''')
def tokenize_function(a_ : Union[str, Any]):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase :Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase :Any = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase__)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase :str = tokenized_datasets.rename_column('''label''' , '''labels''')
def collate_fn(a_ : Any):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''')
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''')
# Instantiate dataloaders.
lowerCamelCase :Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__)
lowerCamelCase :List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__)
return train_dataloader, eval_dataloader
def _lowerCamelCase ( a_ : int , a_ : Tuple):
# Initialize accelerator
lowerCamelCase :Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase :int = config['''lr''']
lowerCamelCase :Optional[int] = int(config['''num_epochs'''])
lowerCamelCase :Optional[int] = int(config['''seed'''])
lowerCamelCase :Tuple = int(config['''batch_size'''])
lowerCamelCase :Any = args.model_name_or_path
set_seed(lowerCAmelCase__)
lowerCamelCase , lowerCamelCase :Optional[int] = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase :List[str] = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__)
# Instantiate optimizer
lowerCamelCase :Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase :Optional[int] = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__)
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase :List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCamelCase :List[str] = 1
lowerCamelCase :Tuple = (len(lowerCAmelCase__) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase :str = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , )
else:
lowerCamelCase :Tuple = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :int = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# We need to keep track of how many total steps we have iterated over
lowerCamelCase :str = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase :int = 0
# Now we train the model
lowerCamelCase :Dict = evaluate.load('''glue''' , '''mrpc''')
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :Optional[Any] = {}
for epoch in range(lowerCAmelCase__ , lowerCAmelCase__):
model.train()
for step, batch in enumerate(lowerCAmelCase__):
lowerCamelCase :int = model(**lowerCAmelCase__)
lowerCamelCase :Optional[int] = outputs.loss
lowerCamelCase :int = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowerCamelCase :Optional[int] = 0
for step, batch in enumerate(lowerCAmelCase__):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
lowerCamelCase :str = model(**lowerCAmelCase__)
lowerCamelCase :List[str] = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
lowerCamelCase , lowerCamelCase :List[Any] = accelerator.gather(
(predictions, batch['''labels'''])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase__) - 1:
lowerCamelCase :List[Any] = predictions[: len(eval_dataloader.dataset) - samples_seen]
lowerCamelCase :Any = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
lowerCamelCase :int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCAmelCase__)
lowerCamelCase :Optional[int] = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
lowerCamelCase :int = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''') , '''w''') as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__)
def _lowerCamelCase ( ):
lowerCamelCase :str = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''')
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , )
parser.add_argument(
'''--output_dir''' , type=lowerCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowerCAmelCase__ , default=3 , help='''Number of train epochs.''' , )
lowerCamelCase :str = parser.parse_args()
lowerCamelCase :List[Any] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__)
if __name__ == "__main__":
main()
| 705
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
A__ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple , __snake_case : Optional[int]=16 , __snake_case : Optional[int]=13 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=14 , __snake_case : int=10 , __snake_case : Optional[int]=19 , __snake_case : int=5 , __snake_case : List[str]=4 , __snake_case : int=True , __snake_case : List[str]=16 , __snake_case : List[Any]=2 , __snake_case : Dict=4 , __snake_case : List[str]=4 , __snake_case : Dict="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=[1, 2, 3, 4, 5] , __snake_case : Dict=25 , __snake_case : Optional[int]=5 , ):
lowerCamelCase :int = d_model
lowerCamelCase :int = parent
lowerCamelCase :Optional[Any] = batch_size
lowerCamelCase :str = prediction_length
lowerCamelCase :Optional[int] = context_length
lowerCamelCase :Optional[Any] = cardinality
lowerCamelCase :str = num_time_features
lowerCamelCase :Tuple = lags_sequence
lowerCamelCase :Optional[Any] = embedding_dimension
lowerCamelCase :Optional[Any] = is_training
lowerCamelCase :Any = hidden_size
lowerCamelCase :Dict = num_hidden_layers
lowerCamelCase :List[Any] = num_attention_heads
lowerCamelCase :Optional[Any] = intermediate_size
lowerCamelCase :Optional[int] = hidden_act
lowerCamelCase :int = hidden_dropout_prob
lowerCamelCase :List[str] = attention_probs_dropout_prob
lowerCamelCase :Tuple = context_length
lowerCamelCase :List[str] = prediction_length + label_length
lowerCamelCase :int = label_length
lowerCamelCase :List[str] = moving_average
lowerCamelCase :List[str] = autocorrelation_factor
def snake_case ( self : List[str] ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def snake_case ( self : Tuple , __snake_case : Union[str, Any] ):
lowerCamelCase :Dict = config.context_length + max(config.lags_sequence )
lowerCamelCase :Optional[int] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCamelCase :Any = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCamelCase :Union[str, Any] = floats_tensor([self.batch_size, _past_length] )
lowerCamelCase :int = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCamelCase :Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCamelCase :List[Any] = floats_tensor([self.batch_size, config.prediction_length] )
lowerCamelCase :List[str] = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def snake_case ( self : Optional[Any] ):
lowerCamelCase :List[str] = self.get_config()
lowerCamelCase :Tuple = self.prepare_autoformer_inputs_dict(a_ )
return config, inputs_dict
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ):
lowerCamelCase :List[str] = AutoformerModel(config=a_ ).to(a_ ).eval()
lowerCamelCase :Union[str, Any] = model(**a_ )
lowerCamelCase :List[str] = outputs.encoder_last_hidden_state
lowerCamelCase :Optional[int] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase :List[str] = model.get_encoder()
encoder.save_pretrained(a_ )
lowerCamelCase :Optional[Any] = AutoformerEncoder.from_pretrained(a_ ).to(a_ )
lowerCamelCase :int = model.create_network_inputs(**a_ )
lowerCamelCase :Tuple = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCamelCase :List[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCamelCase :List[Any] = encoder(inputs_embeds=a_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
lowerCamelCase :Union[str, Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCamelCase :Tuple = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCamelCase :Optional[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCamelCase :str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase :Optional[int] = model.get_decoder()
decoder.save_pretrained(a_ )
lowerCamelCase :List[str] = AutoformerDecoder.from_pretrained(a_ ).to(a_ )
lowerCamelCase :Dict = decoder(
trend=a_ , inputs_embeds=a_ , encoder_hidden_states=a_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _lowerCAmelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_UpperCAmelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCAmelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCAmelCase = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoformerModelTester(self )
lowerCamelCase :Union[str, Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[int] ):
lowerCamelCase :int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase :Dict = model_class(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a_ )
lowerCamelCase :str = model_class.from_pretrained(a_ , output_loading_info=a_ )
self.assertEqual(info['''missing_keys'''] , [] )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a_ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def snake_case ( self : List[Any] ):
pass
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = inspect.signature(getattr(a_ , '''forward''' ) )
# The main input is the name of the argument after `self`
lowerCamelCase :Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , a_ )
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Tuple = model_class(a_ )
lowerCamelCase :List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase :List[Any] = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(a_ )] , a_ )
def snake_case ( self : Tuple ):
lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :Optional[Any] = True
lowerCamelCase :Any = getattr(self.model_tester , '''seq_length''' , a_ )
lowerCamelCase :Union[str, Any] = getattr(self.model_tester , '''decoder_seq_length''' , a_ )
lowerCamelCase :Tuple = getattr(self.model_tester , '''encoder_seq_length''' , a_ )
lowerCamelCase :Tuple = getattr(self.model_tester , '''d_model''' , a_ )
lowerCamelCase :Optional[int] = getattr(self.model_tester , '''num_attention_heads''' , a_ )
lowerCamelCase :Dict = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCamelCase :Optional[Any] = True
lowerCamelCase :Dict = False
lowerCamelCase :Dict = True
lowerCamelCase :List[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(a_ , a_ ) )
lowerCamelCase :Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCamelCase :Union[str, Any] = model(**self._prepare_for_class(a_ , a_ ) )
lowerCamelCase :Optional[Any] = outputs.encoder_attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCamelCase :List[str] = len(a_ )
lowerCamelCase :List[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(a_ , a_ )
# decoder attentions
lowerCamelCase :int = outputs.decoder_attentions
self.assertIsInstance(a_ , (list, tuple) )
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCamelCase :Optional[Any] = outputs.cross_attentions
self.assertIsInstance(a_ , (list, tuple) )
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :Tuple = True
lowerCamelCase :str = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(a_ , a_ ) )
self.assertEqual(out_len + 2 , len(a_ ) )
lowerCamelCase :Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def snake_case ( self : List[Any] ):
super().test_retain_grad_hidden_states_attentions()
def _lowerCamelCase ( a_ : Optional[Any]="train-batch.pt"):
lowerCamelCase :Any = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=lowerCAmelCase_ , repo_type='''dataset''')
lowerCamelCase :Union[str, Any] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_)
return batch
@require_torch
@slow
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ )
lowerCamelCase :Union[str, Any] = prepare_batch()
with torch.no_grad():
lowerCamelCase :str = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
lowerCamelCase :Dict = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , a_ )
lowerCamelCase :str = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=a_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a_ , atol=a_ ) )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :List[Any] = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ )
lowerCamelCase :Union[str, Any] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowerCamelCase :int = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
lowerCamelCase :str = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , a_ )
lowerCamelCase :str = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=a_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a_ , atol=a_ ) )
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ )
lowerCamelCase :Optional[int] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowerCamelCase :Dict = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
lowerCamelCase :Union[str, Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , a_ )
lowerCamelCase :int = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=a_ )
lowerCamelCase :List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , a_ , rtol=1e-1 ) )
| 706
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 0
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , __snake_case : str = "" , __snake_case : int = None , __snake_case : Union[str, Any] = None , **__snake_case : str ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :int = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :Optional[int] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : str ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Union[str, Any] ):
if self.dir_cache is None:
lowerCamelCase :Union[str, Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : Any ):
return self.file.open().read()
def snake_case ( self : List[Any] , __snake_case : int , __snake_case : List[str] = "rb" , __snake_case : List[str]=None , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=None , **__snake_case : Optional[Any] , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : Tuple , __snake_case : List[str] , __snake_case : List[str] = "rb" , __snake_case : Any = None , __snake_case : str = None , __snake_case : Optional[Any] = DEFAULT_BLOCK_SIZE , **__snake_case : Optional[int] , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Optional[int] = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Tuple , __snake_case : List[Any] ):
lowerCamelCase :Union[str, Any] = file_
def __enter__( self : List[Any] ):
self._file.__enter__()
return self
def __exit__( self : int , *__snake_case : int , **__snake_case : Optional[int] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Union[str, Any] ):
return iter(self._file )
def snake_case ( self : str ):
return next(self._file )
def __getattr__( self : Dict , __snake_case : Union[str, Any] ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Any , **__snake_case : Dict ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Optional[Any] = fixed_enter
| 707
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 708
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 0
|
from __future__ import annotations
from collections import Counter
from random import random
class _lowerCAmelCase :
def __init__( self : Optional[int] ):
lowerCamelCase :str = {}
def snake_case ( self : Dict , __snake_case : str ):
lowerCamelCase :Optional[int] = {}
def snake_case ( self : Any , __snake_case : str , __snake_case : str , __snake_case : float ):
if nodea not in self.connections:
self.add_node(UpperCamelCase__ )
if nodea not in self.connections:
self.add_node(UpperCamelCase__ )
lowerCamelCase :List[str] = probability
def snake_case ( self : Optional[Any] ):
return list(self.connections )
def snake_case ( self : Any , __snake_case : str ):
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :List[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _lowerCamelCase ( a_ : Optional[int] , a_ : str , a_ : int):
lowerCamelCase :List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(A_ , A_ , A_)
lowerCamelCase :Tuple = Counter(graph.get_nodes())
lowerCamelCase :Any = start
for _ in range(A_):
lowerCamelCase :Any = graph.transition(A_)
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 0
|
import os
from datetime import datetime as dt
from github import Github
A__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = Github(os.environ['''GITHUB_TOKEN'''])
lowerCamelCase :Tuple = g.get_repo('''huggingface/diffusers''')
lowerCamelCase :Any = repo.get_issues(state='''open''')
for issue in open_issues:
lowerCamelCase :int = sorted(issue.get_comments() , key=lambda a_: i.created_at , reverse=a_)
lowerCamelCase :Any = comments[0] if len(a_) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''')
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''')
issue.remove_from_labels('''stale''')
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''')
issue.add_to_labels('''stale''')
if __name__ == "__main__":
main()
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
A__ = logging.getLogger()
def _lowerCamelCase ( ):
lowerCamelCase :str = argparse.ArgumentParser()
parser.add_argument('''-f''')
lowerCamelCase :Tuple = parser.parse_args()
return args.f
def _lowerCamelCase ( a_ : Any):
lowerCamelCase :List[Any] = {}
lowerCamelCase :Tuple = os.path.join(__snake_case , '''all_results.json''')
if os.path.exists(__snake_case):
with open(__snake_case , '''r''') as f:
lowerCamelCase :Tuple = json.load(__snake_case)
else:
raise ValueError(F"can\'t find {path}")
return results
def _lowerCamelCase ( ):
lowerCamelCase :List[Any] = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
A__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@classmethod
def snake_case ( cls : Optional[Any] ):
lowerCamelCase :List[str] = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Optional[Any] ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Dict ):
lowerCamelCase :int = self.get_auto_remove_tmp_dir()
lowerCamelCase :Optional[int] = F"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowerCamelCase :List[Any] = get_results(__a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__a , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Tuple ):
lowerCamelCase :str = self.get_auto_remove_tmp_dir()
lowerCamelCase :Tuple = F"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowerCamelCase :Union[str, Any] = get_results(__a )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCamelCase :List[str] = F"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
lowerCamelCase :Optional[int] = get_results(__a )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
lowerCamelCase :List[str] = 7 if get_gpu_count() > 1 else 2
lowerCamelCase :str = self.get_auto_remove_tmp_dir()
lowerCamelCase :Optional[Any] = F"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
lowerCamelCase :Dict = get_results(__a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = self.get_auto_remove_tmp_dir()
lowerCamelCase :Tuple = F"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
lowerCamelCase :Union[str, Any] = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
lowerCamelCase :List[str] = self.get_auto_remove_tmp_dir()
lowerCamelCase :Dict = F"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
lowerCamelCase :Any = get_results(__a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : int ):
lowerCamelCase :int = self.get_auto_remove_tmp_dir()
lowerCamelCase :Dict = F"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
lowerCamelCase :Optional[int] = get_results(__a )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Dict ):
lowerCamelCase :List[str] = self.get_auto_remove_tmp_dir()
lowerCamelCase :Union[str, Any] = F"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
lowerCamelCase :int = get_results(__a )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , '''translation_no_trainer''' ) ) )
@slow
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :int = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
lowerCamelCase :int = self.get_auto_remove_tmp_dir()
lowerCamelCase :List[str] = F"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
lowerCamelCase :Union[str, Any] = get_results(__a )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.1_0 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCamelCase :str = F"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowerCamelCase :Optional[int] = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , '''image_classification_no_trainer''' ) ) )
| 711
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 0
|
from math import factorial
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : str , __snake_case : int ):
lowerCamelCase :Optional[Any] = real
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase :Optional[int] = [1] * rank
else:
lowerCamelCase :Dict = rank
def __repr__( self : Union[str, Any] ):
return (
F"{self.real}+"
F"{'+'.join(str(lowerCamelCase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase_ )
def __add__( self : Union[str, Any] , __snake_case : Dict ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return Dual(self.real + other , self.duals )
lowerCamelCase :Optional[Any] = self.duals.copy()
lowerCamelCase :List[Any] = other.duals.copy()
if len(lowerCamelCase_ ) > len(lowerCamelCase_ ):
o_dual.extend([1] * (len(lowerCamelCase_ ) - len(lowerCamelCase_ )) )
elif len(lowerCamelCase_ ) < len(lowerCamelCase_ ):
s_dual.extend([1] * (len(lowerCamelCase_ ) - len(lowerCamelCase_ )) )
lowerCamelCase :Any = []
for i in range(len(lowerCamelCase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase_ )
_UpperCAmelCase = __add__
def __sub__( self : int , __snake_case : Union[str, Any] ):
return self + other * -1
def __mul__( self : List[str] , __snake_case : int ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase :Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase_ )
lowerCamelCase :Tuple = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase_ )
_UpperCAmelCase = __mul__
def __truediv__( self : str , __snake_case : int ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase :Tuple = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase_ )
raise ValueError
def __floordiv__( self : List[str] , __snake_case : Optional[Any] ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase :Optional[int] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase_ )
raise ValueError
def __pow__( self : int , __snake_case : List[str] ):
if n < 0 or isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
lowerCamelCase :Optional[Any] = self
for _ in range(n - 1 ):
x *= self
return x
def _lowerCamelCase ( a_ : Any , a_ : Tuple , a_ : List[str]):
if not callable(lowerCamelCase_):
raise ValueError('''differentiate() requires a function as input for func''')
if not isinstance(lowerCamelCase_ , (float, int)):
raise ValueError('''differentiate() requires a float as input for position''')
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
raise ValueError('''differentiate() requires an int as input for order''')
lowerCamelCase :Optional[int] = Dual(lowerCamelCase_ , 1)
lowerCamelCase :Tuple = func(lowerCamelCase_)
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
def _lowerCamelCase ( a_ : Dict):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 712
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
def _lowerCamelCase ( a_ : float):
return 10 - x * x
def _lowerCamelCase ( a_ : float , a_ : float):
if equation(A__) * equation(A__) >= 0:
raise ValueError('''Wrong space!''')
lowerCamelCase :int = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase :List[str] = (a + b) / 2
# Check if middle point is root
if equation(A__) == 0.0:
break
# Decide the side to repeat the steps
if equation(A__) * equation(A__) < 0:
lowerCamelCase :Optional[Any] = c
else:
lowerCamelCase :Optional[Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 713
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 0
|
from math import isqrt
def _lowerCAmelCase ( a_ : Any):
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCamelCase__) + 1))
def _lowerCAmelCase ( a_ : List[Any] = 10**6):
lowerCamelCase :Dict = 0
lowerCamelCase :Tuple = 1
lowerCamelCase :Any = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCamelCase__)
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 714
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 0
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase :Dict = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
lowerCamelCase :Optional[int] = [t[-1] for t in os.walk(os.path.join(lowerCAmelCase_ , os.listdir(lowerCAmelCase_ )[0] , '''snapshots''' ) )]
lowerCamelCase :int = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Union[str, Any] ):
lowerCamelCase , lowerCamelCase :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowerCAmelCase_ )
lowerCamelCase :int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowerCamelCase :Any = jax.random.PRNGKey(0 )
lowerCamelCase :List[Any] = 4
lowerCamelCase :List[Any] = jax.device_count()
lowerCamelCase :Tuple = num_samples * [prompt]
lowerCamelCase :Dict = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
lowerCamelCase :Tuple = replicate(lowerCAmelCase_ )
lowerCamelCase :Union[str, Any] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCamelCase :Dict = shard(lowerCAmelCase_ )
lowerCamelCase :List[Any] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
lowerCamelCase :Optional[int] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCAmelCase_ ) == num_samples
def snake_case ( self : Optional[int] ):
lowerCamelCase , lowerCamelCase :Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=lowerCAmelCase_ )
lowerCamelCase :Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowerCamelCase :Any = jax.random.PRNGKey(0 )
lowerCamelCase :str = 50
lowerCamelCase :Tuple = jax.device_count()
lowerCamelCase :List[str] = num_samples * [prompt]
lowerCamelCase :Dict = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
lowerCamelCase :Optional[int] = replicate(lowerCAmelCase_ )
lowerCamelCase :int = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCamelCase :Union[str, Any] = shard(lowerCAmelCase_ )
lowerCamelCase :int = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def snake_case ( self : Any ):
lowerCamelCase , lowerCamelCase :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ )
lowerCamelCase :Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowerCamelCase :List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase :List[Any] = 50
lowerCamelCase :Optional[Any] = jax.device_count()
lowerCamelCase :List[str] = num_samples * [prompt]
lowerCamelCase :str = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
lowerCamelCase :Optional[Any] = replicate(lowerCAmelCase_ )
lowerCamelCase :List[Any] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCamelCase :List[Any] = shard(lowerCAmelCase_ )
lowerCamelCase :Optional[int] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def snake_case ( self : List[str] ):
lowerCamelCase , lowerCamelCase :Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
lowerCamelCase :List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowerCamelCase :List[str] = jax.random.PRNGKey(0 )
lowerCamelCase :List[Any] = 50
lowerCamelCase :Any = jax.device_count()
lowerCamelCase :Any = num_samples * [prompt]
lowerCamelCase :Any = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
lowerCamelCase :Optional[int] = replicate(lowerCAmelCase_ )
lowerCamelCase :Union[str, Any] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCamelCase :List[Any] = shard(lowerCAmelCase_ )
lowerCamelCase :Dict = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def snake_case ( self : List[Any] ):
lowerCamelCase :int = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
lowerCamelCase , lowerCamelCase :List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
lowerCamelCase :Union[str, Any] = scheduler.create_state()
lowerCamelCase :Optional[int] = scheduler_state
lowerCamelCase :List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowerCamelCase :Tuple = jax.random.PRNGKey(0 )
lowerCamelCase :Any = 50
lowerCamelCase :Any = jax.device_count()
lowerCamelCase :Optional[int] = num_samples * [prompt]
lowerCamelCase :Dict = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
lowerCamelCase :Optional[Any] = replicate(lowerCAmelCase_ )
lowerCamelCase :str = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCamelCase :Dict = shard(lowerCAmelCase_ )
lowerCamelCase :Any = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def snake_case ( self : int ):
lowerCamelCase :List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowerCamelCase :str = jax.device_count()
lowerCamelCase :List[str] = num_samples * [prompt]
lowerCamelCase :Optional[int] = jax.random.split(jax.random.PRNGKey(0 ) , lowerCAmelCase_ )
lowerCamelCase , lowerCamelCase :Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ , )
lowerCamelCase :Tuple = replicate(lowerCAmelCase_ )
lowerCamelCase :Optional[int] = pipeline.prepare_inputs(lowerCAmelCase_ )
lowerCamelCase :Optional[Any] = shard(lowerCAmelCase_ )
lowerCamelCase :Union[str, Any] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
lowerCamelCase :Optional[Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
lowerCamelCase , lowerCamelCase :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ , use_memory_efficient_attention=lowerCAmelCase_ , )
lowerCamelCase :Optional[int] = replicate(lowerCAmelCase_ )
lowerCamelCase :Optional[int] = pipeline.prepare_inputs(lowerCAmelCase_ )
lowerCamelCase :Union[str, Any] = shard(lowerCAmelCase_ )
lowerCamelCase :Dict = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
lowerCamelCase :Any = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 716
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 0
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def _lowerCamelCase ( a_ : int , a_ : Any):
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCamelCase :Dict = XLMProphetNetForConditionalGenerationOld.from_pretrained(a_)
lowerCamelCase :List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
a_ , output_loading_info=a_)
else:
lowerCamelCase :List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(a_)
lowerCamelCase :List[str] = ProphetNetForConditionalGeneration.from_pretrained(
a_ , output_loading_info=a_)
lowerCamelCase :Tuple = ['''key_proj''', '''value_proj''', '''query_proj''']
lowerCamelCase :List[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
lowerCamelCase :Optional[int] = key.split('''.''')
if attributes[0] == "lm_head":
lowerCamelCase :Optional[Any] = prophet
lowerCamelCase :int = prophet_old
else:
lowerCamelCase :List[str] = prophet.prophetnet
lowerCamelCase :str = prophet_old.model
lowerCamelCase :Any = False
for attribute in attributes:
if attribute in mapping:
lowerCamelCase :Optional[Any] = mapping[attribute]
if not hasattr(a_ , a_) and len(a_) > 0:
lowerCamelCase :Union[str, Any] = attribute
elif hasattr(a_ , a_):
lowerCamelCase :Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCamelCase :str = old_model.weight
logger.info(F"{attribute} is initialized.")
lowerCamelCase :List[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCamelCase :Union[str, Any] = old_model.bias
logger.info(F"{attribute} is initialized")
lowerCamelCase :Dict = True
break
elif attribute in special_keys and hasattr(a_ , '''in_proj_weight'''):
lowerCamelCase :List[Any] = old_model.in_proj_weight.shape[0] // 3
lowerCamelCase :Optional[int] = getattr(a_ , a_)
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCamelCase :str = nn.Parameter(old_model.in_proj_weight[:embed_dim, :])
lowerCamelCase :Any = nn.Parameter(old_model.in_proj_bias[:embed_dim])
elif attribute == "key_proj":
lowerCamelCase :Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :])
lowerCamelCase :List[str] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim])
elif attribute == "value_proj":
lowerCamelCase :Dict = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :])
lowerCamelCase :str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :])
lowerCamelCase :List[Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
lowerCamelCase :Dict = nn.Parameter(old_model.embed_positions.weight[:5_12, :])
lowerCamelCase :Union[str, Any] = True
break
if attribute.isdigit():
lowerCamelCase :Dict = model[int(a_)]
lowerCamelCase :Optional[int] = old_model[int(a_)]
else:
lowerCamelCase :Any = getattr(a_ , a_)
if old_attribute == "":
lowerCamelCase :Tuple = old_model
else:
if not hasattr(a_ , a_):
raise ValueError(F"{old_model} does not have {old_attribute}")
lowerCamelCase :str = getattr(a_ , a_)
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!")
print(F"Saving model to {pytorch_dump_folder_path}")
prophet.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 717
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
def _lowerCamelCase ( a_ : Tuple = 1_00):
lowerCamelCase :List[str] = set()
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :Any = n + 1 # maximum limit
for a in range(2 , a_):
for b in range(2 , a_):
lowerCamelCase :Any = a**b # calculates the current power
collect_powers.add(a_) # adds the result to the set
return len(a_)
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 718
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 0
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
A__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
A__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A__ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
A__ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def _lowerCamelCase ( a_ : Optional[Any]):
lowerCamelCase :Union[str, Any] = None
# source code of `config_class`
lowerCamelCase :str = inspect.getsource(__a)
lowerCamelCase :List[Any] = _re_checkpoint.findall(__a)
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/'''):
lowerCamelCase :List[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase :Optional[Any] = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
lowerCamelCase :Dict = ckpt_name
break
return checkpoint
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values()):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCamelCase :Optional[int] = get_checkpoint_from_config_class(__a)
lowerCamelCase :str = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a)
if len(__a) > 0:
lowerCamelCase :Dict = '''\n'''.join(sorted(__a))
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}")
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 719
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _lowerCAmelCase ( _snake_case ):
_UpperCAmelCase = """cvt"""
def __init__( self : Optional[Any] , __snake_case : Any=3 , __snake_case : List[str]=[7, 3, 3] , __snake_case : List[Any]=[4, 2, 2] , __snake_case : Optional[Any]=[2, 1, 1] , __snake_case : int=[64, 192, 384] , __snake_case : Union[str, Any]=[1, 3, 6] , __snake_case : int=[1, 2, 10] , __snake_case : str=[4.0, 4.0, 4.0] , __snake_case : List[Any]=[0.0, 0.0, 0.0] , __snake_case : Optional[Any]=[0.0, 0.0, 0.0] , __snake_case : Tuple=[0.0, 0.0, 0.1] , __snake_case : Any=[True, True, True] , __snake_case : Optional[Any]=[False, False, True] , __snake_case : List[Any]=["dw_bn", "dw_bn", "dw_bn"] , __snake_case : Optional[Any]=[3, 3, 3] , __snake_case : List[str]=[1, 1, 1] , __snake_case : Optional[int]=[2, 2, 2] , __snake_case : Any=[1, 1, 1] , __snake_case : Tuple=[1, 1, 1] , __snake_case : int=0.0_2 , __snake_case : Union[str, Any]=1e-1_2 , **__snake_case : str , ):
super().__init__(**snake_case_ )
lowerCamelCase :Optional[Any] = num_channels
lowerCamelCase :str = patch_sizes
lowerCamelCase :Union[str, Any] = patch_stride
lowerCamelCase :Tuple = patch_padding
lowerCamelCase :Tuple = embed_dim
lowerCamelCase :Optional[Any] = num_heads
lowerCamelCase :Optional[int] = depth
lowerCamelCase :List[str] = mlp_ratio
lowerCamelCase :str = attention_drop_rate
lowerCamelCase :Optional[int] = drop_rate
lowerCamelCase :str = drop_path_rate
lowerCamelCase :Dict = qkv_bias
lowerCamelCase :Union[str, Any] = cls_token
lowerCamelCase :Any = qkv_projection_method
lowerCamelCase :Union[str, Any] = kernel_qkv
lowerCamelCase :List[str] = padding_kv
lowerCamelCase :Tuple = stride_kv
lowerCamelCase :List[Any] = padding_q
lowerCamelCase :Dict = stride_q
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :int = layer_norm_eps
| 720
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 0
|
from __future__ import annotations
def _lowerCamelCase ( a_ : list[int] , a_ : int):
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = len(a_) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase :Optional[Any] = i + 1
else:
lowerCamelCase :Any = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 721
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 0
|
import re
import string
import numpy as np
import datasets
A__ = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
A__ = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
A__ = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def snake_case ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def snake_case ( self : Tuple , __snake_case : int , __snake_case : int , __snake_case : Tuple=None , __snake_case : str=False , __snake_case : List[str]=False , __snake_case : List[str]=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCamelCase :List[str] = np.array([re.sub(_lowerCamelCase , '''''' , _lowerCamelCase ) for x in predictions] )
lowerCamelCase :Optional[Any] = np.array([re.sub(_lowerCamelCase , '''''' , _lowerCamelCase ) for x in references] )
else:
lowerCamelCase :Dict = np.asarray(_lowerCamelCase )
lowerCamelCase :Optional[Any] = np.asarray(_lowerCamelCase )
if ignore_case:
lowerCamelCase :Dict = np.char.lower(_lowerCamelCase )
lowerCamelCase :Union[str, Any] = np.char.lower(_lowerCamelCase )
if ignore_punctuation:
lowerCamelCase :List[Any] = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
lowerCamelCase :Any = np.char.translate(_lowerCamelCase , table=_lowerCamelCase )
lowerCamelCase :Dict = np.char.translate(_lowerCamelCase , table=_lowerCamelCase )
if ignore_numbers:
lowerCamelCase :Optional[Any] = string.digits.maketrans('''''' , '''''' , string.digits )
lowerCamelCase :str = np.char.translate(_lowerCamelCase , table=_lowerCamelCase )
lowerCamelCase :Tuple = np.char.translate(_lowerCamelCase , table=_lowerCamelCase )
lowerCamelCase :List[str] = predictions == references
return {"exact_match": np.mean(_lowerCamelCase ) * 100}
| 700
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A__ = random.Random()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any]=1.0 , a_ : List[Any]=None , a_ : Union[str, Any]=None):
if rng is None:
lowerCamelCase :Any = global_rng
lowerCamelCase :List[str] = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , __snake_case : Dict , __snake_case : Optional[Any]=7 , __snake_case : Any=400 , __snake_case : Tuple=2000 , __snake_case : Optional[Any]=1 , __snake_case : Tuple=0.0 , __snake_case : int=16000 , __snake_case : List[str]=True , __snake_case : str=80 , __snake_case : Union[str, Any]=16 , __snake_case : str=64 , __snake_case : Optional[Any]="hann_window" , __snake_case : Any=80 , __snake_case : int=7600 , __snake_case : Optional[int]=1e-1_0 , __snake_case : int=True , ):
lowerCamelCase :int = parent
lowerCamelCase :Dict = batch_size
lowerCamelCase :List[Any] = min_seq_length
lowerCamelCase :List[Any] = max_seq_length
lowerCamelCase :Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase :Optional[Any] = feature_size
lowerCamelCase :Optional[Any] = padding_value
lowerCamelCase :Union[str, Any] = sampling_rate
lowerCamelCase :Optional[Any] = do_normalize
lowerCamelCase :Tuple = num_mel_bins
lowerCamelCase :List[str] = hop_length
lowerCamelCase :Any = win_length
lowerCamelCase :Union[str, Any] = win_function
lowerCamelCase :List[str] = fmin
lowerCamelCase :Union[str, Any] = fmax
lowerCamelCase :List[Any] = mel_floor
lowerCamelCase :Optional[int] = return_attention_mask
def snake_case ( self : List[str] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def snake_case ( self : List[Any] , __snake_case : int=False , __snake_case : Tuple=False ):
def _flatten(__snake_case : Dict ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
lowerCamelCase :Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCamelCase :Optional[Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase :Dict = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
def snake_case ( self : int , __snake_case : Tuple=False , __snake_case : Union[str, Any]=False ):
if equal_length:
lowerCamelCase :Union[str, Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase :Optional[Any] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase :Tuple = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class _lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
_UpperCAmelCase = SpeechTaFeatureExtractor
def snake_case ( self : str ):
lowerCamelCase :Tuple = SpeechTaFeatureExtractionTester(self )
def snake_case ( self : Union[str, Any] , __snake_case : Union[str, Any] ):
self.assertTrue(np.all(np.mean(__lowerCamelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCamelCase , axis=0 ) - 1 ) < 1e-3 ) )
def snake_case ( self : Any ):
lowerCamelCase :Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase :Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase :Optional[int] = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase :Dict = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
lowerCamelCase :Any = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test batched
lowerCamelCase :int = feat_extract(__lowerCamelCase , return_tensors='''np''' ).input_values
lowerCamelCase :str = feat_extract(__lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def snake_case ( self : int ):
lowerCamelCase :str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase :Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase :List[str] = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCamelCase :Optional[Any] = [None, 1600, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase :Any = feat_extract(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors='''np''' )
lowerCamelCase :Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase :List[str] = range(800 , 1400 , 200 )
lowerCamelCase :Optional[Any] = [floats_list((1, x) )[0] for x in lengths]
lowerCamelCase :Union[str, Any] = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCamelCase :int = [None, 1600, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase :str = feat_extract(__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase )
lowerCamelCase :Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase :Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase :Optional[int] = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
lowerCamelCase :List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase :List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase :Union[str, Any] = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
lowerCamelCase :Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowerCamelCase :Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase :Optional[int] = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
lowerCamelCase :Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase :Union[str, Any] = np.random.rand(100 ).astype(np.floataa )
lowerCamelCase :Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase :str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCamelCase :int = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase :Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase :Optional[Any] = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase :List[str] = feature_extractor(audio_target=__lowerCamelCase , padding=__lowerCamelCase , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCamelCase :List[str] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
lowerCamelCase :Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test batched
lowerCamelCase :Dict = feature_extractor(__lowerCamelCase , return_tensors='''np''' ).input_values
lowerCamelCase :List[Any] = feature_extractor(__lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase :Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase :Optional[int] = np.asarray(__lowerCamelCase )
lowerCamelCase :Dict = feature_extractor(__lowerCamelCase , return_tensors='''np''' ).input_values
lowerCamelCase :Dict = feature_extractor(__lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def snake_case ( self : int ):
lowerCamelCase :Tuple = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase :Optional[int] = feat_extract.model_input_names[0]
lowerCamelCase :Dict = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__lowerCamelCase ) == len(__lowerCamelCase ) for x, y in zip(__lowerCamelCase , processed_features[input_name] ) ) )
lowerCamelCase :str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__lowerCamelCase )
lowerCamelCase :List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
lowerCamelCase :int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase :List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def snake_case ( self : List[Any] ):
lowerCamelCase :List[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__lowerCamelCase )
lowerCamelCase :List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase :int = feat_extract.model_input_names[0]
lowerCamelCase :Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
lowerCamelCase :Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase :Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase :List[str] = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase :Optional[int] = feat_extract.model_input_names[0]
lowerCamelCase :Union[str, Any] = BatchFeature({input_name: speech_inputs} )
lowerCamelCase :Optional[int] = feat_extract.num_mel_bins # hack!
lowerCamelCase :List[str] = feat_extract.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
lowerCamelCase :Any = feat_extract.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.feat_extract_dict
lowerCamelCase :Optional[Any] = True
lowerCamelCase :Optional[Any] = self.feature_extraction_class(**__lowerCamelCase )
lowerCamelCase :int = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase :Optional[int] = [len(__lowerCamelCase ) for x in speech_inputs]
lowerCamelCase :Union[str, Any] = feat_extract.model_input_names[0]
lowerCamelCase :Tuple = BatchFeature({input_name: speech_inputs} )
lowerCamelCase :str = feat_extract.num_mel_bins # hack!
lowerCamelCase :List[Any] = feat_extract.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , __lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __lowerCamelCase )
def snake_case ( self : Any ):
lowerCamelCase :List[Any] = self.feat_extract_dict
lowerCamelCase :Any = True
lowerCamelCase :List[str] = self.feature_extraction_class(**__lowerCamelCase )
lowerCamelCase :Any = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase :Tuple = [len(__lowerCamelCase ) for x in speech_inputs]
lowerCamelCase :str = feat_extract.model_input_names[0]
lowerCamelCase :int = BatchFeature({input_name: speech_inputs} )
lowerCamelCase :Optional[int] = min(__lowerCamelCase )
lowerCamelCase :Dict = feat_extract.num_mel_bins # hack!
lowerCamelCase :Union[str, Any] = feat_extract.pad(
__lowerCamelCase , padding='''max_length''' , max_length=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , __lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def snake_case ( self : Any , __snake_case : List[Any] ):
from datasets import load_dataset
lowerCamelCase :Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCamelCase :Tuple = ds.sort('''id''' ).select(range(__lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
lowerCamelCase :Dict = self._load_datasamples(1 )
lowerCamelCase :Any = SpeechTaFeatureExtractor()
lowerCamelCase :Dict = feature_extractor(__lowerCamelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , __lowerCamelCase , atol=1e-6 ) )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
lowerCamelCase :Tuple = self._load_datasamples(1 )
lowerCamelCase :Optional[Any] = SpeechTaFeatureExtractor()
lowerCamelCase :Any = feature_extractor(audio_target=__lowerCamelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __lowerCamelCase , atol=1e-4 ) )
| 701
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 0
|
from __future__ import annotations
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Union[str, Any] = None , a_ : List[str] = None):
if start is None:
lowerCamelCase :Optional[int] = 0
if end is None:
lowerCamelCase :Tuple = len(UpperCamelCase__) - 1
if start >= end:
return
lowerCamelCase :List[str] = (start + end) // 2
slowsort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
slowsort(UpperCamelCase__ , mid + 1 , UpperCamelCase__)
if sequence[end] < sequence[mid]:
lowerCamelCase , lowerCamelCase :Union[str, Any] = sequence[mid], sequence[end]
slowsort(UpperCamelCase__ , UpperCamelCase__ , end - 1)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 702
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 0
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : int , ):
lowerCamelCase :Tuple = parent
lowerCamelCase :List[str] = 13
lowerCamelCase :Tuple = 7
lowerCamelCase :List[Any] = 30
lowerCamelCase :Optional[int] = self.seq_length + self.mem_len
lowerCamelCase :List[str] = 15
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :Dict = True
lowerCamelCase :List[Any] = 99
lowerCamelCase :List[Any] = [10, 50, 80]
lowerCamelCase :int = 32
lowerCamelCase :Tuple = 32
lowerCamelCase :List[Any] = 4
lowerCamelCase :Any = 8
lowerCamelCase :str = 128
lowerCamelCase :Any = 2
lowerCamelCase :List[str] = 2
lowerCamelCase :Dict = None
lowerCamelCase :List[Any] = 1
lowerCamelCase :List[str] = 0
lowerCamelCase :str = 3
lowerCamelCase :Union[str, Any] = self.vocab_size - 1
lowerCamelCase :str = 0.0_1
def snake_case ( self : Tuple ):
lowerCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase :Dict = None
if self.use_labels:
lowerCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase :str = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case ( self : Any ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def snake_case ( self : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :List[Any] = TFTransfoXLModel(lowercase_ )
lowerCamelCase :Union[str, Any] = model(lowercase_ ).to_tuple()
lowerCamelCase :Optional[int] = {"input_ids": input_ids_a, "mems": mems_a}
lowerCamelCase :Dict = model(lowercase_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case ( self : Dict , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :str = TFTransfoXLLMHeadModel(lowercase_ )
lowerCamelCase :int = model(lowercase_ ).to_tuple()
lowerCamelCase :List[str] = {"input_ids": input_ids_a, "labels": lm_labels}
lowerCamelCase :List[Any] = model(lowercase_ ).to_tuple()
lowerCamelCase :str = model([input_ids_a, mems_a] ).to_tuple()
lowerCamelCase :List[Any] = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
lowerCamelCase :Dict = model(lowercase_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Any , __snake_case : Any ):
lowerCamelCase :Tuple = TFTransfoXLForSequenceClassification(lowercase_ )
lowerCamelCase :Any = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : str ):
lowerCamelCase :Any = self.prepare_config_and_inputs()
(lowerCamelCase) :Union[str, Any] = config_and_inputs
lowerCamelCase :int = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( snake_case__ , snake_case__ , unittest.TestCase ):
_UpperCAmelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCAmelCase = () if is_tf_available() else ()
_UpperCAmelCase = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Union[str, Any] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Any ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case ( self : Any ):
lowerCamelCase :int = TFTransfoXLModelTester(self )
lowerCamelCase :Any = ConfigTester(self , config_class=lowercase_ , d_embed=37 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
def snake_case ( self : Any ):
self.model_tester.set_seed()
lowerCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowercase_ )
def snake_case ( self : str ):
self.model_tester.set_seed()
lowerCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase_ )
def snake_case ( self : int ):
lowerCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase_ )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :str = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCamelCase :Dict = model_class(lowercase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCamelCase :Union[str, Any] = model.get_output_embeddings()
assert isinstance(lowercase_ , tf.keras.layers.Layer )
lowerCamelCase :Union[str, Any] = model.get_bias()
assert name is None
else:
lowerCamelCase :Dict = model.get_output_embeddings()
assert x is None
lowerCamelCase :str = model.get_bias()
assert name is None
def snake_case ( self : Optional[int] ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def snake_case ( self : int ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :int = TFTransfoXLModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def snake_case ( self : Dict ):
pass
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def snake_case ( self : Tuple ):
lowerCamelCase :List[str] = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowerCamelCase :Union[str, Any] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCamelCase :Union[str, Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCamelCase :Dict = model.generate(lowercase_ , max_length=200 , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_ )
| 703
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 0
|
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( a_ : int):
if not nums:
raise ValueError('''List is empty''')
return sum(a_) / len(a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
A__ = logging.get_logger(__name__)
@dataclass
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Union[str, Any]=False , __snake_case : Dict=False , __snake_case : Tuple=6.0 , __snake_case : Union[str, Any]=None , __snake_case : Optional[int]=False , __snake_case : Optional[int]=False , __snake_case : Dict=None , __snake_case : Tuple="fp4" , __snake_case : Union[str, Any]=False , **__snake_case : Union[str, Any] , ):
lowerCamelCase :Tuple = load_in_abit
lowerCamelCase :Dict = load_in_abit
lowerCamelCase :List[Any] = llm_inta_threshold
lowerCamelCase :Tuple = llm_inta_skip_modules
lowerCamelCase :List[Any] = llm_inta_enable_fpaa_cpu_offload
lowerCamelCase :Optional[Any] = llm_inta_has_fpaa_weight
lowerCamelCase :List[Any] = bnb_abit_quant_type
lowerCamelCase :Any = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowerCamelCase :List[Any] = torch.floataa
elif isinstance(__snake_case , __snake_case ):
lowerCamelCase :Optional[int] = getattr(__snake_case , __snake_case )
elif isinstance(__snake_case , torch.dtype ):
lowerCamelCase :List[str] = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def snake_case ( self : Union[str, Any] ):
if not isinstance(self.llm_inta_threshold , __snake_case ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __snake_case ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __snake_case ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , __snake_case ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , __snake_case ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , __snake_case ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def snake_case ( self : Optional[Any] ):
return self.load_in_abit or self.load_in_abit
def snake_case ( self : Tuple ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def snake_case ( cls : Union[str, Any] , __snake_case : str , __snake_case : List[Any] , **__snake_case : int ):
lowerCamelCase :Any = cls(**__snake_case )
lowerCamelCase :Tuple = []
for key, value in kwargs.items():
if hasattr(__snake_case , __snake_case ):
setattr(__snake_case , __snake_case , __snake_case )
to_remove.append(__snake_case )
for key in to_remove:
kwargs.pop(__snake_case , __snake_case )
if return_unused_kwargs:
return config, kwargs
else:
return config
def snake_case ( self : List[Any] , __snake_case : Union[str, os.PathLike] ):
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as writer:
lowerCamelCase :int = self.to_dict()
lowerCamelCase :Dict = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + "\n"
writer.write(__snake_case )
def snake_case ( self : Tuple ):
lowerCamelCase :int = copy.deepcopy(self.__dict__ )
lowerCamelCase :Dict = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self : Tuple ):
return F"{self.__class__.__name__} {self.to_json_string()}"
def snake_case ( self : Any , __snake_case : bool = True ):
if use_diff is True:
lowerCamelCase :Optional[Any] = self.to_diff_dict()
else:
lowerCamelCase :Tuple = self.to_dict()
return json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + "\n"
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :List[str] = self.to_dict()
# get the default config dict
lowerCamelCase :Optional[Any] = BitsAndBytesConfig().to_dict()
lowerCamelCase :Optional[int] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowerCamelCase :List[Any] = value
return serializable_config_dict
| 705
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
A__ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Dict , **__snake_case : List[str] ):
super().__init__(**__UpperCamelCase )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , '''vision''' )
self.check_model_type(__UpperCamelCase )
def __call__( self : List[str] , __snake_case : Union[str, "Image.Image", List[Dict[str, Any]]] , __snake_case : Union[str, List[str]] = None , **__snake_case : List[Any] , ):
if "text_queries" in kwargs:
lowerCamelCase :Any = kwargs.pop('''text_queries''' )
if isinstance(__UpperCamelCase , (str, Image.Image) ):
lowerCamelCase :List[Any] = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
lowerCamelCase :Tuple = image
lowerCamelCase :Dict = super().__call__(__UpperCamelCase , **__UpperCamelCase )
return results
def snake_case ( self : Tuple , **__snake_case : Union[str, Any] ):
lowerCamelCase :Optional[int] = {}
if "threshold" in kwargs:
lowerCamelCase :Tuple = kwargs['''threshold''']
if "top_k" in kwargs:
lowerCamelCase :List[Any] = kwargs['''top_k''']
return {}, {}, postprocess_params
def snake_case ( self : List[Any] , __snake_case : str ):
lowerCamelCase :Any = load_image(inputs['''image'''] )
lowerCamelCase :Dict = inputs['''candidate_labels''']
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCamelCase :Any = candidate_labels.split(''',''' )
lowerCamelCase :str = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__UpperCamelCase ):
lowerCamelCase :int = self.tokenizer(__UpperCamelCase , return_tensors=self.framework )
lowerCamelCase :Union[str, Any] = self.image_processor(__UpperCamelCase , return_tensors=self.framework )
yield {
"is_last": i == len(__UpperCamelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def snake_case ( self : Dict , __snake_case : str ):
lowerCamelCase :int = model_inputs.pop('''target_size''' )
lowerCamelCase :Any = model_inputs.pop('''candidate_label''' )
lowerCamelCase :int = model_inputs.pop('''is_last''' )
lowerCamelCase :Any = self.model(**__UpperCamelCase )
lowerCamelCase :int = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def snake_case ( self : Any , __snake_case : Dict , __snake_case : Union[str, Any]=0.1 , __snake_case : int=None ):
lowerCamelCase :str = []
for model_output in model_outputs:
lowerCamelCase :List[Any] = model_output['''candidate_label''']
lowerCamelCase :str = BaseModelOutput(__UpperCamelCase )
lowerCamelCase :Union[str, Any] = self.image_processor.post_process_object_detection(
outputs=__UpperCamelCase , threshold=__UpperCamelCase , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
lowerCamelCase :str = outputs['''scores'''][index].item()
lowerCamelCase :List[str] = self._get_bounding_box(outputs['''boxes'''][index][0] )
lowerCamelCase :Optional[Any] = {'''score''': score, '''label''': label, '''box''': box}
results.append(__UpperCamelCase )
lowerCamelCase :List[str] = sorted(__UpperCamelCase , key=lambda __snake_case : x["score"] , reverse=__UpperCamelCase )
if top_k:
lowerCamelCase :Optional[int] = results[:top_k]
return results
def snake_case ( self : Optional[Any] , __snake_case : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :List[Any] = box.int().tolist()
lowerCamelCase :Any = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 706
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 0
|
def _lowerCamelCase ( a_ : int = 1_00):
lowerCamelCase :str = 0
lowerCamelCase :Optional[int] = 0
for i in range(1 , n + 1):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 707
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 708
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 0
|
import math
def _lowerCamelCase ( a_ : List[str] , a_ : int):
lowerCamelCase :Any = len(a_)
lowerCamelCase :Tuple = int(math.floor(math.sqrt(a_)))
lowerCamelCase :Optional[int] = 0
while arr[min(a_ , a_) - 1] < x:
lowerCamelCase :Any = step
step += int(math.floor(math.sqrt(a_)))
if prev >= n:
return -1
while arr[prev] < x:
lowerCamelCase :Optional[Any] = prev + 1
if prev == min(a_ , a_):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_UpperCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
_UpperCamelCase = [int(item) for item in user_input.split(""",""")]
_UpperCamelCase = int(input("""Enter the number to be searched:\n"""))
_UpperCamelCase = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(F'Number {x} is at index {res}')
| 709
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 0
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self : Optional[int] ):
lowerCamelCase :int = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowerCamelCase :int = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
lowerCamelCase :Dict = '''The dog is cute and lives in the garden house'''
lowerCamelCase :Tuple = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
lowerCamelCase :int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase :Union[str, Any] = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
lowerCamelCase :Union[str, Any] = model(UpperCamelCase__ )['''last_hidden_state''']
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _lowerCamelCase ( a_ : int):
return EnvironmentCommand()
def _lowerCamelCase ( a_ : List[str]):
return EnvironmentCommand(args.accelerate_config_file)
class _lowerCAmelCase ( __UpperCAmelCase ):
@staticmethod
def snake_case ( __snake_case : ArgumentParser ):
lowerCamelCase :Union[str, Any] = parser.add_parser('''env''' )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
'''--accelerate-config_file''' , default=lowerCAmelCase_ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : Any , __snake_case : Optional[Any] , *__snake_case : Optional[Any] ):
lowerCamelCase :List[Any] = accelerate_config_file
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Tuple = '''not installed'''
if is_safetensors_available():
import safetensors
lowerCamelCase :Optional[Any] = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
lowerCamelCase :Any = F"{safetensors.__version__} but is ignored because of PyTorch version too old."
lowerCamelCase :Tuple = '''not installed'''
lowerCamelCase :int = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCamelCase :str = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
lowerCamelCase :int = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCamelCase :Optional[int] = (
'''\n'''.join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F"\t{accelerate_config}"
)
lowerCamelCase :List[str] = '''not installed'''
lowerCamelCase :str = '''NA'''
if is_torch_available():
import torch
lowerCamelCase :int = torch.__version__
lowerCamelCase :int = torch.cuda.is_available()
lowerCamelCase :List[str] = '''not installed'''
lowerCamelCase :Any = '''NA'''
if is_tf_available():
import tensorflow as tf
lowerCamelCase :str = tf.__version__
try:
# deprecated in v2.1
lowerCamelCase :str = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCamelCase :List[Any] = bool(tf.config.list_physical_devices('''GPU''' ) )
lowerCamelCase :List[Any] = '''not installed'''
lowerCamelCase :Dict = '''not installed'''
lowerCamelCase :Optional[int] = '''not installed'''
lowerCamelCase :Tuple = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
lowerCamelCase :Dict = flax.__version__
lowerCamelCase :Tuple = jax.__version__
lowerCamelCase :Any = jaxlib.__version__
lowerCamelCase :Dict = jax.lib.xla_bridge.get_backend().platform
lowerCamelCase :Any = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F"{safetensors_version}",
'''Accelerate version''': F"{accelerate_version}",
'''Accelerate config''': F"{accelerate_config_str}",
'''PyTorch version (GPU?)''': F"{pt_version} ({pt_cuda_available})",
'''Tensorflow version (GPU?)''': F"{tf_version} ({tf_cuda_available})",
'''Flax version (CPU?/GPU?/TPU?)''': F"{flax_version} ({jax_backend})",
'''Jax version''': F"{jax_version}",
'''JaxLib version''': F"{jaxlib_version}",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def snake_case ( __snake_case : Union[str, Any] ):
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 711
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 0
|
def _lowerCamelCase ( a_ : int , a_ : int):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''')
lowerCamelCase :Any = str(bin(a_))[2:] # remove the leading "0b"
lowerCamelCase :Optional[Any] = str(bin(a_))[2:] # remove the leading "0b"
lowerCamelCase :int = max(len(a_) , len(a_))
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1'''))
for char_a, char_b in zip(a_binary.zfill(a_) , b_binary.zfill(a_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A__ = logging.get_logger(__name__) # pylint: disable=invalid-name
A__ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCamelCase ( a_ : int , a_ : Optional[int] , a_ : int=8):
lowerCamelCase :Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase :Dict = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCamelCase ( a_ : Dict , a_ : Union[str, Any]=5_12 , a_ : Optional[int]=5_12):
lowerCamelCase :str = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1)
lowerCamelCase :Optional[int] = np.array(pil_image.convert('''RGB'''))
lowerCamelCase :Any = arr.astype(np.floataa) / 127.5 - 1
lowerCamelCase :Optional[Any] = np.transpose(UpperCAmelCase__ , [2, 0, 1])
lowerCamelCase :Optional[int] = torch.from_numpy(UpperCAmelCase__).unsqueeze(0)
return image
class _lowerCAmelCase ( _a ):
def __init__( self : Dict , __snake_case : int , __snake_case : str , __snake_case : str , ):
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
lowerCamelCase :Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case ( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ):
# get the original timestep using init_timestep
lowerCamelCase :Dict = min(int(num_inference_steps * strength ) , _A )
lowerCamelCase :Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase :Any = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Any , __snake_case : List[Any] , __snake_case : List[str]=None ):
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}" )
lowerCamelCase :Tuple = image.to(device=_A , dtype=_A )
lowerCamelCase :Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowerCamelCase :str = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(_A )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_A , _A ):
lowerCamelCase :List[str] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
lowerCamelCase :Tuple = torch.cat(_A , dim=0 )
else:
lowerCamelCase :Optional[int] = self.movq.encode(_A ).latent_dist.sample(_A )
lowerCamelCase :int = self.movq.config.scaling_factor * init_latents
lowerCamelCase :Union[str, Any] = torch.cat([init_latents] , dim=0 )
lowerCamelCase :List[str] = init_latents.shape
lowerCamelCase :str = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
lowerCamelCase :int = self.scheduler.add_noise(_A , _A , _A )
lowerCamelCase :Dict = init_latents
return latents
def snake_case ( self : str , __snake_case : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCamelCase :Optional[int] = torch.device(F"cuda:{gpu_id}" )
lowerCamelCase :Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def snake_case ( self : List[Any] , __snake_case : Union[str, Any]=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowerCamelCase :Optional[Any] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase :Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase , lowerCamelCase :Optional[Any] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
lowerCamelCase :int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : List[str] ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : Optional[int] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Union[str, Any] = 512 , __snake_case : Optional[int] = 512 , __snake_case : List[str] = 100 , __snake_case : Optional[int] = 4.0 , __snake_case : Any = 0.3 , __snake_case : Union[str, Any] = 1 , __snake_case : Any = None , __snake_case : str = "pil" , __snake_case : Optional[int] = True , ):
lowerCamelCase :Dict = self._execution_device
lowerCamelCase :int = guidance_scale > 1.0
if isinstance(_A , _A ):
lowerCamelCase :Union[str, Any] = torch.cat(_A , dim=0 )
lowerCamelCase :str = image_embeds.shape[0]
if isinstance(_A , _A ):
lowerCamelCase :Dict = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase :List[Any] = image_embeds.repeat_interleave(_A , dim=0 )
lowerCamelCase :Dict = negative_image_embeds.repeat_interleave(_A , dim=0 )
lowerCamelCase :Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
lowerCamelCase :Tuple = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowerCamelCase :List[str] = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
lowerCamelCase :Tuple = image.to(dtype=image_embeds.dtype , device=_A )
lowerCamelCase :Dict = self.movq.encode(_A )['''latents''']
lowerCamelCase :Dict = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
lowerCamelCase , lowerCamelCase :Tuple = self.get_timesteps(_A , _A , _A )
lowerCamelCase :List[str] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowerCamelCase , lowerCamelCase :Tuple = downscale_height_and_width(_A , _A , self.movq_scale_factor )
lowerCamelCase :int = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase :List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase :Any = {'''image_embeds''': image_embeds}
lowerCamelCase :List[Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
lowerCamelCase , lowerCamelCase :Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase , lowerCamelCase :str = noise_pred.chunk(2 )
lowerCamelCase , lowerCamelCase :List[str] = variance_pred.chunk(2 )
lowerCamelCase :str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase :Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase , lowerCamelCase :Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase :Any = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
lowerCamelCase :Dict = self.movq.decode(_A , force_not_quantize=_A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowerCamelCase :int = image * 0.5 + 0.5
lowerCamelCase :Dict = image.clamp(0 , 1 )
lowerCamelCase :int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase :Union[str, Any] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 713
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 0
|
A__ = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
A__ = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase ( a_ : str):
lowerCamelCase :List[str] = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''')
return encoded
def _lowerCAmelCase ( a_ : str):
if set(UpperCAmelCase__) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''')
lowerCamelCase :Optional[Any] = ''''''
for word in coded.split():
while len(UpperCAmelCase__) != 0:
decoded += decode_dict[word[:5]]
lowerCamelCase :Tuple = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 714
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 0
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowerCamelCase ( a_ : str , a_ : int , a_ : List[str] , a_ : str=10_24):
lowerCamelCase , lowerCamelCase :Tuple = [], []
lowerCamelCase :List[Any] = list(zip(_UpperCamelCase , _UpperCamelCase))
lowerCamelCase , lowerCamelCase :List[Any] = sorted_examples[0]
def is_too_big(a_ : Tuple):
return tok(_UpperCamelCase , return_tensors='''pt''').input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:]):
lowerCamelCase :List[str] = new_src + ''' ''' + src
lowerCamelCase :Optional[Any] = new_tgt + ''' ''' + tgt
if is_too_big(_UpperCamelCase) or is_too_big(_UpperCamelCase): # cant fit, finalize example
finished_src.append(_UpperCamelCase)
finished_tgt.append(_UpperCamelCase)
lowerCamelCase , lowerCamelCase :Any = src, tgt
else: # can fit, keep adding
lowerCamelCase , lowerCamelCase :List[Any] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_UpperCamelCase)
finished_tgt.append(_UpperCamelCase)
return finished_src, finished_tgt
def _lowerCamelCase ( a_ : List[str] , a_ : Path , a_ : Optional[int] , a_ : Any):
lowerCamelCase :Optional[int] = Path(_UpperCamelCase)
save_path.mkdir(exist_ok=_UpperCamelCase)
for split in ["train"]:
lowerCamelCase , lowerCamelCase :Any = data_dir / F"{split}.source", data_dir / F"{split}.target"
lowerCamelCase :Optional[int] = [x.rstrip() for x in Path(_UpperCamelCase).open().readlines()]
lowerCamelCase :List[Any] = [x.rstrip() for x in Path(_UpperCamelCase).open().readlines()]
lowerCamelCase , lowerCamelCase :Any = pack_examples(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
print(F"packed {split} split from {len(_UpperCamelCase)} examples -> {len(_UpperCamelCase)}.")
Path(save_path / F"{split}.source").open('''w''').write('''\n'''.join(_UpperCamelCase))
Path(save_path / F"{split}.target").open('''w''').write('''\n'''.join(_UpperCamelCase))
for split in ["val", "test"]:
lowerCamelCase , lowerCamelCase :Dict = data_dir / F"{split}.source", data_dir / F"{split}.target"
shutil.copyfile(_UpperCamelCase , save_path / F"{split}.source")
shutil.copyfile(_UpperCamelCase , save_path / F"{split}.target")
def _lowerCamelCase ( ):
lowerCamelCase :Dict = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=_UpperCamelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''')
parser.add_argument('''--max_seq_len''' , type=_UpperCamelCase , default=1_28)
parser.add_argument('''--data_dir''' , type=_UpperCamelCase)
parser.add_argument('''--save_path''' , type=_UpperCamelCase)
lowerCamelCase :Union[str, Any] = parser.parse_args()
lowerCamelCase :Any = AutoTokenizer.from_pretrained(args.tok_name)
return pack_data_dir(_UpperCamelCase , Path(args.data_dir) , args.max_seq_len , args.save_path)
if __name__ == "__main__":
packer_cli()
| 715
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
_UpperCAmelCase = LongformerTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = LongformerTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Union[str, Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Any = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : Optional[Any] , **__snake_case : List[str] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[int] , **__snake_case : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Union[str, Any] , __snake_case : List[str] ):
lowerCamelCase :Optional[Any] = '''lower newer'''
lowerCamelCase :Any = '''lower newer'''
return input_text, output_text
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase :int = '''lower newer'''
lowerCamelCase :Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case ) # , add_prefix_space=True)
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :Union[str, Any] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__snake_case ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__snake_case ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def snake_case ( self : List[str] ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
lowerCamelCase :Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :List[str] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def snake_case ( self : List[Any] ):
lowerCamelCase :str = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''Encode this sequence.'''
lowerCamelCase :List[Any] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowerCamelCase :Union[str, Any] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__snake_case , __snake_case )
lowerCamelCase :Dict = tokenizer.encode(__snake_case , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__snake_case , __snake_case )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowerCamelCase :Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__snake_case , __snake_case )
# Testing spaces after special tokens
lowerCamelCase :Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case )} ) # mask token has a left space
lowerCamelCase :Optional[int] = tokenizer.convert_tokens_to_ids(__snake_case )
lowerCamelCase :Tuple = '''Encode <mask> sequence'''
lowerCamelCase :Tuple = '''Encode <mask>sequence'''
lowerCamelCase :Any = tokenizer.encode(__snake_case )
lowerCamelCase :Optional[Any] = encoded.index(__snake_case )
lowerCamelCase :List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__snake_case , __snake_case )
lowerCamelCase :List[Any] = tokenizer.encode(__snake_case )
lowerCamelCase :int = encoded.index(__snake_case )
lowerCamelCase :Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__snake_case , __snake_case )
def snake_case ( self : List[Any] ):
pass
def snake_case ( self : List[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Dict = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :Dict = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :List[Any] = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :str = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def snake_case ( self : Optional[Any] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCamelCase :int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case )
lowerCamelCase :int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCamelCase :Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __snake_case )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __snake_case )
self.assertEqual(post_processor_state['''trim_offsets'''] , __snake_case )
def snake_case ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[int] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase :int = F"{text_of_1_token} {text_of_1_token}"
lowerCamelCase :List[str] = self.rust_tokenizer_class.from_pretrained(
__snake_case , use_fast=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case )
lowerCamelCase :Dict = tokenizer_r(__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__snake_case ) + 1, len(__snake_case ) + 1 + len(__snake_case )) , )
lowerCamelCase :Dict = self.rust_tokenizer_class.from_pretrained(
__snake_case , use_fast=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case )
lowerCamelCase :str = tokenizer_r(__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__snake_case ) + 1, len(__snake_case ) + 1 + len(__snake_case )) , )
lowerCamelCase :Tuple = self.rust_tokenizer_class.from_pretrained(
__snake_case , use_fast=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case )
lowerCamelCase :Optional[Any] = tokenizer_r(__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__snake_case ), len(__snake_case ) + 1 + len(__snake_case )) , )
lowerCamelCase :Tuple = self.rust_tokenizer_class.from_pretrained(
__snake_case , use_fast=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case )
lowerCamelCase :Dict = tokenizer_r(__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__snake_case ), len(__snake_case ) + 1 + len(__snake_case )) , )
lowerCamelCase :List[str] = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__snake_case , use_fast=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case )
lowerCamelCase :Optional[Any] = tokenizer_r(__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__snake_case ) + 1, 1 + len(__snake_case ) + 1 + len(__snake_case )) , )
lowerCamelCase :Optional[int] = self.rust_tokenizer_class.from_pretrained(
__snake_case , use_fast=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case )
lowerCamelCase :List[str] = tokenizer_r(__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__snake_case ), 1 + len(__snake_case ) + 1 + len(__snake_case )) , )
lowerCamelCase :Dict = self.rust_tokenizer_class.from_pretrained(
__snake_case , use_fast=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case )
lowerCamelCase :str = tokenizer_r(__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__snake_case ), 1 + len(__snake_case ) + 1 + len(__snake_case )) , )
| 716
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 0
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
A__ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def _lowerCamelCase ( a_ : str = "mumbai"):
lowerCamelCase :Optional[int] = BeautifulSoup(requests.get(url + location).content , '''html.parser''')
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''}):
lowerCamelCase :Optional[Any] = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''}).text.strip()
lowerCamelCase :Union[str, Any] = job.find('''span''' , {'''class''': '''company'''}).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 717
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class _lowerCAmelCase :
_UpperCAmelCase = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'The column name of the images in the files.'} )
_UpperCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'A folder containing the training data.'} )
_UpperCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'A folder containing the validation data.'} )
_UpperCAmelCase = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def snake_case ( self : Tuple ):
lowerCamelCase :Optional[Any] = {}
if self.train_dir is not None:
lowerCamelCase :List[str] = self.train_dir
if self.validation_dir is not None:
lowerCamelCase :str = self.validation_dir
lowerCamelCase :Union[str, Any] = data_files if data_files else None
@dataclass
class _lowerCAmelCase :
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_UpperCAmelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_UpperCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_UpperCAmelCase = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class _lowerCAmelCase ( UpperCamelCase_ ):
_UpperCAmelCase = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Optional[int] = torch.stack([example['''pixel_values'''] for example in examples])
return {"pixel_values": pixel_values}
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase :List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith('''.json'''):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase :Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase :Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , snake_case_ , snake_case_)
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase :Optional[Any] = training_args.get_process_log_level()
logger.setLevel(snake_case_)
transformers.utils.logging.set_verbosity(snake_case_)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}")
logger.info(F"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
lowerCamelCase :List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase :Tuple = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''')
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''')
# Initialize our dataset.
lowerCamelCase :str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase :Any = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case_) and data_args.train_val_split > 0.0:
lowerCamelCase :List[Any] = ds['''train'''].train_test_split(data_args.train_val_split)
lowerCamelCase :int = split['''train''']
lowerCamelCase :Optional[int] = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase :Optional[int] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase :Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case_)
elif model_args.model_name_or_path:
lowerCamelCase :Any = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case_)
else:
lowerCamelCase :Any = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''')
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(F"New config: {config}")
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
})
# create image processor
if model_args.image_processor_name:
lowerCamelCase :List[str] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case_)
elif model_args.model_name_or_path:
lowerCamelCase :Optional[Any] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case_)
else:
lowerCamelCase :Any = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowerCamelCase :int = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''')
lowerCamelCase :Union[str, Any] = ViTMAEForPreTraining(snake_case_)
if training_args.do_train:
lowerCamelCase :int = ds['''train'''].column_names
else:
lowerCamelCase :Optional[Any] = ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowerCamelCase :Optional[int] = data_args.image_column_name
elif "image" in column_names:
lowerCamelCase :List[Any] = '''image'''
elif "img" in column_names:
lowerCamelCase :Dict = '''img'''
else:
lowerCamelCase :List[str] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowerCamelCase :Optional[int] = image_processor.size['''shortest_edge''']
else:
lowerCamelCase :Union[str, Any] = (image_processor.size['''height'''], image_processor.size['''width'''])
lowerCamelCase :Union[str, Any] = Compose(
[
Lambda(lambda a_: img.convert('''RGB''') if img.mode != "RGB" else img),
RandomResizedCrop(snake_case_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std),
])
def preprocess_images(a_ : List[Any]):
lowerCamelCase :Any = [transforms(snake_case_) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''')
if data_args.max_train_samples is not None:
lowerCamelCase :Any = ds['''train'''].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
# Set the training transforms
ds["train"].set_transform(snake_case_)
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''')
if data_args.max_eval_samples is not None:
lowerCamelCase :Union[str, Any] = (
ds['''validation'''].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
ds["validation"].set_transform(snake_case_)
# Compute absolute learning rate
lowerCamelCase :Dict = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowerCamelCase :int = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
lowerCamelCase :Dict = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
lowerCamelCase :Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase :List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase :List[str] = last_checkpoint
lowerCamelCase :Any = trainer.train(resume_from_checkpoint=snake_case_)
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics)
trainer.save_metrics('''train''' , train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase :Dict = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case_)
trainer.save_metrics('''eval''' , snake_case_)
# Write model card and (optionally) push to hub
lowerCamelCase :int = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_)
else:
trainer.create_model_card(**snake_case_)
def _lowerCamelCase ( a_ : Tuple):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 718
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 0
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase = TransfoXLTokenizer
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : List[Any] ):
super().setUp()
lowerCamelCase :Any = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
lowerCamelCase :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def snake_case ( self : Union[str, Any] , **__snake_case : Tuple ):
lowerCamelCase :List[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Tuple , __snake_case : Optional[Any] ):
lowerCamelCase :Optional[Any] = '''<unk> UNwanted , running'''
lowerCamelCase :Tuple = '''<unk> unwanted, running'''
return input_text, output_text
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :str = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__snake_case )
lowerCamelCase :str = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(__snake_case , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [0, 4, 8, 7] )
def snake_case ( self : List[Any] ):
lowerCamelCase :Any = TransfoXLTokenizer(lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :List[str] = TransfoXLTokenizer(lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case ( self : str ):
lowerCamelCase :Dict = TransfoXLTokenizer(lower_case=__snake_case )
lowerCamelCase :List[str] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
lowerCamelCase :Any = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(__snake_case ) , __snake_case )
self.assertEqual(tokenizer.convert_tokens_to_string(__snake_case ) , __snake_case )
def snake_case ( self : List[str] ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :List[str] = len(__snake_case )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__snake_case ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 719
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 0
|
from __future__ import annotations
import requests
A__ = set(
"""approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports""".split()
)
def _lowerCamelCase ( a_ : str , a_ : Optional[Any] = 1 , a_ : List[str] = "new" , a_ : Dict = None):
lowerCamelCase :Union[str, Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCamelCase__) - valid_terms)):
lowerCamelCase :List[str] = F"Invalid search term: {invalid_search_terms}"
raise ValueError(lowerCamelCase__)
lowerCamelCase :List[str] = requests.get(
F"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 4_29:
raise requests.HTTPError
lowerCamelCase :Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCamelCase__)}
lowerCamelCase :int = {}
for id_ in range(lowerCamelCase__):
lowerCamelCase :Tuple = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 720
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 0
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _lowerCamelCase ( a_ : List[Any]):
if "model" in orig_key:
lowerCamelCase :Dict = orig_key.replace('''model.''' , '''''')
if "norm1" in orig_key:
lowerCamelCase :Optional[Any] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''')
if "norm2" in orig_key:
lowerCamelCase :int = orig_key.replace('''norm2''' , '''output.LayerNorm''')
if "norm" in orig_key:
lowerCamelCase :Optional[Any] = orig_key.replace('''norm''' , '''LayerNorm''')
if "transformer" in orig_key:
lowerCamelCase :Tuple = orig_key.split('''.''')[0].split('''_''')[-1]
lowerCamelCase :str = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}")
if "mha.attn" in orig_key:
lowerCamelCase :Optional[Any] = orig_key.replace('''mha.attn''' , '''attention.self''')
if "mha" in orig_key:
lowerCamelCase :Optional[Any] = orig_key.replace('''mha''' , '''attention''')
if "W_q" in orig_key:
lowerCamelCase :str = orig_key.replace('''W_q''' , '''self.query''')
if "W_k" in orig_key:
lowerCamelCase :Union[str, Any] = orig_key.replace('''W_k''' , '''self.key''')
if "W_v" in orig_key:
lowerCamelCase :Optional[Any] = orig_key.replace('''W_v''' , '''self.value''')
if "ff1" in orig_key:
lowerCamelCase :Optional[Any] = orig_key.replace('''ff1''' , '''intermediate.dense''')
if "ff2" in orig_key:
lowerCamelCase :Any = orig_key.replace('''ff2''' , '''output.dense''')
if "ff" in orig_key:
lowerCamelCase :str = orig_key.replace('''ff''' , '''output.dense''')
if "mlm_class" in orig_key:
lowerCamelCase :Union[str, Any] = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''')
if "mlm" in orig_key:
lowerCamelCase :List[str] = orig_key.replace('''mlm''' , '''cls.predictions.transform''')
if "cls" not in orig_key:
lowerCamelCase :Dict = '''yoso.''' + orig_key
return orig_key
def _lowerCamelCase ( a_ : Tuple , a_ : str):
for key in orig_state_dict.copy().keys():
lowerCamelCase :Any = orig_state_dict.pop(a_)
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase :str = val
lowerCamelCase :Optional[int] = orig_state_dict['''cls.predictions.decoder.bias''']
lowerCamelCase :List[str] = torch.arange(a_).expand((1, -1)) + 2
return orig_state_dict
def _lowerCamelCase ( a_ : int , a_ : Union[str, Any] , a_ : str):
lowerCamelCase :Optional[Any] = torch.load(a_ , map_location='''cpu''')['''model_state_dict''']
lowerCamelCase :Dict = YosoConfig.from_json_file(a_)
lowerCamelCase :List[str] = YosoForMaskedLM(a_)
lowerCamelCase :Optional[int] = convert_checkpoint_helper(config.max_position_embeddings , a_)
print(model.load_state_dict(a_))
model.eval()
model.save_pretrained(a_)
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}")
if __name__ == "__main__":
A__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A__ : Dict = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 721
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 0
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def snake_case ( self : Any ):
lowerCamelCase :List[str] = tempfile.mkdtemp()
lowerCamelCase :List[str] = 8
# DPR tok
lowerCamelCase :str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase :Dict = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__snake_case , exist_ok=__snake_case )
lowerCamelCase :List[str] = os.path.join(__snake_case , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
lowerCamelCase :Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Any = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Tuple = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__snake_case , exist_ok=__snake_case )
lowerCamelCase :Dict = os.path.join(__snake_case , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Optional[int] = os.path.join(__snake_case , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : List[Any] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def snake_case ( self : Optional[Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def snake_case ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def snake_case ( self : Optional[int] ):
lowerCamelCase :Tuple = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
lowerCamelCase :Union[str, Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowerCamelCase :Union[str, Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__snake_case )
rag_tokenizer.save_pretrained(__snake_case )
lowerCamelCase :int = RagTokenizer.from_pretrained(__snake_case , config=__snake_case )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __snake_case )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __snake_case )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Optional[int] = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
lowerCamelCase :Any = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowerCamelCase :Dict = tokenizer(__snake_case )
self.assertIsNotNone(__snake_case )
@slow
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
lowerCamelCase :List[Any] = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowerCamelCase :str = tokenizer(__snake_case )
self.assertIsNotNone(__snake_case )
| 700
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 0
|
A__ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
A__ = [None] * 10_000_000
A__ = True
A__ = False
def _lowerCamelCase ( a_ : int):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase :List[Any] = chain(next_number(a_))
lowerCamelCase :List[Any] = number_chain
while number < 10_00_00_00:
lowerCamelCase :List[Any] = number_chain
number *= 10
return number_chain
def _lowerCamelCase ( a_ : int = 10_00_00_00):
for i in range(1 , a_):
if CHAINS[i] is None:
chain(i + 1)
return CHAINS[:number].count(a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 701
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 0
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
A__ = False
try:
A__ = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class _lowerCAmelCase :
def __init__( self : int , __snake_case : str = None , __snake_case : list = [] ):
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :Any = choices
lowerCamelCase :List[Any] = prompt
if sys.platform == "win32":
lowerCamelCase :Optional[Any] = '''*'''
else:
lowerCamelCase :List[Any] = '''➔ '''
def snake_case ( self : int , __snake_case : Optional[Any] , __snake_case : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __snake_case )
else:
forceWrite(self.choices[index] , __snake_case )
def snake_case ( self : Union[str, Any] , __snake_case : int ):
if index == self.position:
forceWrite(F" {self.arrow_char} " )
self.write_choice(__snake_case )
else:
forceWrite(F" {self.choices[index]}" )
reset_cursor()
def snake_case ( self : Union[str, Any] , __snake_case : Direction , __snake_case : int = 1 ):
lowerCamelCase :int = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__snake_case )
move_cursor(__snake_case , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def snake_case ( self : Dict ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def snake_case ( self : List[str] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def snake_case ( self : List[str] ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def snake_case ( self : List[str] ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__snake_case )] for number in range(10 )] )
def snake_case ( self : str ):
lowerCamelCase :Dict = int(chr(self.current_selection ) )
lowerCamelCase :Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __snake_case )
else:
return
else:
return
def snake_case ( self : int , __snake_case : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
lowerCamelCase :Tuple = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__snake_case )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
lowerCamelCase :List[Any] = int(builtins.input() )
except ValueError:
lowerCamelCase :List[Any] = default_choice
else:
lowerCamelCase :Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(__snake_case , '''\n''' )
return choice
| 702
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 703
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 0
|
'''simple docstring'''
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 704
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
@property
def snake_case ( self : Any ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self : str ):
lowerCamelCase :int = ort.SessionOptions()
lowerCamelCase :Dict = False
return options
def snake_case ( self : Dict ):
lowerCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowerCamelCase :List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowerCamelCase :Optional[int] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase :Optional[int] = '''A red cat sitting on a park bench'''
lowerCamelCase :Optional[int] = np.random.RandomState(0 )
lowerCamelCase :int = pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=__snake_case , output_type='''np''' , )
lowerCamelCase :str = output.images
lowerCamelCase :Tuple = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase :Tuple = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self : List[Any] ):
lowerCamelCase :Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowerCamelCase :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowerCamelCase :int = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCamelCase :Any = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase :Any = '''A red cat sitting on a park bench'''
lowerCamelCase :Optional[int] = np.random.RandomState(0 )
lowerCamelCase :str = pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=__snake_case , output_type='''np''' , )
lowerCamelCase :str = output.images
lowerCamelCase :Any = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase :Optional[Any] = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 705
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 706
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'openai-gpt'
_UpperCAmelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Dict , __snake_case : str=40478 , __snake_case : Any=512 , __snake_case : Tuple=768 , __snake_case : Any=12 , __snake_case : Tuple=12 , __snake_case : Union[str, Any]="gelu" , __snake_case : Dict=0.1 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Union[str, Any]=1e-5 , __snake_case : str=0.0_2 , __snake_case : Optional[int]="cls_index" , __snake_case : int=True , __snake_case : List[Any]=None , __snake_case : int=True , __snake_case : Any=0.1 , **__snake_case : Any , ):
lowerCamelCase :List[str] = vocab_size
lowerCamelCase :str = n_positions
lowerCamelCase :Any = n_embd
lowerCamelCase :str = n_layer
lowerCamelCase :Optional[Any] = n_head
lowerCamelCase :Tuple = afn
lowerCamelCase :int = resid_pdrop
lowerCamelCase :Optional[Any] = embd_pdrop
lowerCamelCase :Any = attn_pdrop
lowerCamelCase :Optional[Any] = layer_norm_epsilon
lowerCamelCase :str = initializer_range
lowerCamelCase :str = summary_type
lowerCamelCase :int = summary_use_proj
lowerCamelCase :int = summary_activation
lowerCamelCase :Optional[Any] = summary_first_dropout
lowerCamelCase :int = summary_proj_to_labels
super().__init__(**__snake_case )
| 707
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from collections import defaultdict
from math import gcd
def _lowerCamelCase ( a_ : int = 1_50_00_00):
lowerCamelCase :defaultdict = defaultdict(a_)
lowerCamelCase :Optional[Any] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , a_ , 2):
if gcd(a_ , a_) > 1:
continue
lowerCamelCase :Dict = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(a_ , limit + 1 , a_):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1)
if __name__ == "__main__":
print(F'{solution() = }')
| 708
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 0
|
_UpperCamelCase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 709
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A__ = """pt"""
elif is_tf_available():
A__ = """tf"""
else:
A__ = """jax"""
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = ByTaTokenizer
_UpperCAmelCase = False
def snake_case ( self : Dict ):
super().setUp()
lowerCamelCase :Optional[Any] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self : Dict ):
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def snake_case ( self : Any , **__snake_case : List[Any] ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : int=False , __snake_case : Union[str, Any]=20 , __snake_case : Tuple=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCamelCase :Optional[int] = []
for i in range(len(__snake_case ) ):
try:
lowerCamelCase :List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase :Optional[int] = list(filter(lambda __snake_case : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __snake_case ) )
lowerCamelCase :List[str] = list(filter(lambda __snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__snake_case ) , __snake_case ) )
if max_length is not None and len(__snake_case ) > max_length:
lowerCamelCase :List[Any] = toks[:max_length]
if min_length is not None and len(__snake_case ) < min_length and len(__snake_case ) > 0:
while len(__snake_case ) < min_length:
lowerCamelCase :Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase :List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase :Any = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case )
if " " not in output_txt and len(__snake_case ) > 1:
lowerCamelCase :List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__snake_case )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__snake_case )
)
if with_prefix_space:
lowerCamelCase :List[str] = ''' ''' + output_txt
lowerCamelCase :Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
return output_txt, output_ids
def snake_case ( self : List[str] ):
lowerCamelCase :List[str] = self.ta_base_tokenizer
lowerCamelCase :List[str] = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
lowerCamelCase :Optional[int] = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = self.ta_base_tokenizer
lowerCamelCase :Union[str, Any] = '''Unicode €.'''
lowerCamelCase :Optional[Any] = tokenizer(__snake_case )
lowerCamelCase :Union[str, Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , __snake_case )
# decoding
lowerCamelCase :Union[str, Any] = tokenizer.decode(__snake_case )
self.assertEqual(__snake_case , '''Unicode €.</s>''' )
lowerCamelCase :str = tokenizer('''e è é ê ë''' )
lowerCamelCase :Dict = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , __snake_case )
# decoding
lowerCamelCase :Optional[Any] = tokenizer.decode(__snake_case )
self.assertEqual(__snake_case , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def snake_case ( self : Any ):
lowerCamelCase :Tuple = self.ta_base_tokenizer
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCamelCase :Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCamelCase :Any = tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
if FRAMEWORK != "jax":
lowerCamelCase :str = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase :Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__snake_case , __snake_case )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.ta_base_tokenizer
lowerCamelCase :Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :Tuple = tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''decoder_input_ids''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Dict = self.ta_base_tokenizer
lowerCamelCase :int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCamelCase :List[str] = tokenizer(
text_target=__snake_case , max_length=32 , padding='''max_length''' , truncation=__snake_case , return_tensors=__snake_case )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def snake_case ( self : List[str] ):
lowerCamelCase :Union[str, Any] = self.ta_base_tokenizer
lowerCamelCase :int = ['''A long paragraph for summarization. </s>''']
lowerCamelCase :Tuple = ['''Summary of the text. </s>''']
# fmt: off
lowerCamelCase :Dict = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCamelCase :Tuple = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCamelCase :Optional[int] = tokenizer(__snake_case , text_target=__snake_case )
self.assertEqual(__snake_case , batch['''input_ids'''][0] )
self.assertEqual(__snake_case , batch['''labels'''][0] )
def snake_case ( self : List[str] ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase :Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase :List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase :List[Any] = tempfile.mkdtemp()
lowerCamelCase :Optional[Any] = ''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase :int = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
tokenizer.save_pretrained(__snake_case )
lowerCamelCase :int = tokenizer.__class__.from_pretrained(__snake_case )
lowerCamelCase :Optional[Any] = after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
shutil.rmtree(__snake_case )
lowerCamelCase :Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase :int = tempfile.mkdtemp()
lowerCamelCase :Union[str, Any] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCamelCase :List[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCamelCase :int = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
tokenizer.save_pretrained(__snake_case )
lowerCamelCase :int = tokenizer.__class__.from_pretrained(__snake_case )
lowerCamelCase :Any = after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase :Optional[int] = tokenizer.__class__.from_pretrained(__snake_case , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__snake_case )
def snake_case ( self : List[Any] ):
lowerCamelCase :Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__snake_case )
with open(os.path.join(__snake_case , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase :List[Any] = json.load(__snake_case )
with open(os.path.join(__snake_case , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase :Any = json.load(__snake_case )
lowerCamelCase :Optional[int] = [F"<extra_id_{i}>" for i in range(125 )]
lowerCamelCase :Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCamelCase :Union[str, Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__snake_case , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__snake_case , __snake_case )
with open(os.path.join(__snake_case , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__snake_case , __snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase :List[Any] = tokenizer_class.from_pretrained(
__snake_case , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase :Any = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__snake_case )]
lowerCamelCase :Optional[int] = tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__snake_case )
lowerCamelCase :Tuple = tokenizer_class.from_pretrained(__snake_case )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Dict ):
pass
def snake_case ( self : Optional[Any] ):
pass
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : int ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCamelCase :Any = self.get_tokenizers(fast=__snake_case , do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCamelCase :str = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
lowerCamelCase :Tuple = tokenizer.convert_tokens_to_string(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def snake_case ( self : Tuple ):
lowerCamelCase :Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCamelCase :Union[str, Any] = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCamelCase :Dict = 0
lowerCamelCase :List[Any] = tokenizer.convert_ids_to_tokens(
__snake_case , skip_special_tokens=__snake_case )
for attr in attributes_list:
setattr(__snake_case , attr + '''_id''' , __snake_case )
self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case )
self.assertEqual(getattr(__snake_case , attr + '''_id''' ) , __snake_case )
setattr(__snake_case , attr + '''_id''' , __snake_case )
self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case )
self.assertEqual(getattr(__snake_case , attr + '''_id''' ) , __snake_case )
setattr(__snake_case , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(__snake_case , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(__snake_case , '''additional_special_tokens_ids''' ) , [] )
setattr(__snake_case , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(__snake_case , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(__snake_case , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.