code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[str] = GPTSanJapaneseTokenizer
A : Optional[Any] = False
A : List[Any] = {'''do_clean_text''': False, '''add_prefix_space''': False}
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE : Union[str, Any] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE : Optional[Any] = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file, 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.get_input_output_texts(A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(A, clean_up_tokenization_spaces=A )
return text, ids
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE : int = 'こんにちは、世界。 こんばんは、㔺界。'
SCREAMING_SNAKE_CASE : str = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(A )
self.assertListEqual(A, A )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A, A )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE : Union[str, Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE : int = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
SCREAMING_SNAKE_CASE : List[str] = 'こんにちは、、、、世界。こんばんは、、、、世界。'
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : str = tokenizer.decode(A )
self.assertEqual(A, A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
SCREAMING_SNAKE_CASE : List[Any] = 'こんにちは、世界。'
SCREAMING_SNAKE_CASE : Dict = 'こんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE : List[Any] = 'こんにちは、世界。こんばんは、世界。😀'
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(prefix_text + input_text )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode('', prefix_text=prefix_text + input_text )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(A, prefix_text=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(A )
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(A )
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(A )
self.assertEqual(A, A )
self.assertEqual(A, A )
self.assertEqual(A, A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
SCREAMING_SNAKE_CASE : Optional[Any] = 'こんにちは、世界。'
SCREAMING_SNAKE_CASE : Dict = 'こんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE : str = len(tokenizer.encode(A ) ) - 2
SCREAMING_SNAKE_CASE : Optional[Any] = len(tokenizer.encode(A ) ) - 2
SCREAMING_SNAKE_CASE : Union[str, Any] = [1] + [0] * (len_prefix + len_text + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = [1] * (len_prefix + len_text + 1) + [0]
SCREAMING_SNAKE_CASE : List[str] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE : Tuple = tokenizer('', prefix_text=prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE : List[str] = tokenizer(A, prefix_text=A ).token_type_ids
self.assertListEqual(A, A )
self.assertListEqual(A, A )
self.assertListEqual(A, A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
SCREAMING_SNAKE_CASE : str = tokenizer.encode('あンいワ' )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode('', prefix_text='あンいワ' )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode('いワ', prefix_text='あン' )
self.assertEqual(tokenizer.decode(A ), tokenizer.decode(A ) )
self.assertEqual(tokenizer.decode(A ), tokenizer.decode(A ) )
self.assertNotEqual(A, A )
self.assertNotEqual(A, A )
self.assertEqual(x_token_a[1], x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1], x_token_a[3] ) # SEG token
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
SCREAMING_SNAKE_CASE : Any = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
SCREAMING_SNAKE_CASE : Any = tokenizer(A, padding=A )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.batch_encode_plus(A, padding=A )
# fmt: off
SCREAMING_SNAKE_CASE : int = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
SCREAMING_SNAKE_CASE : str = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
SCREAMING_SNAKE_CASE : List[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids, A )
self.assertListEqual(x_token.token_type_ids, A )
self.assertListEqual(x_token.attention_mask, A )
self.assertListEqual(x_token_a.input_ids, A )
self.assertListEqual(x_token_a.token_type_ids, A )
self.assertListEqual(x_token_a.attention_mask, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
| 28 |
'''simple docstring'''
import math
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE ):
__A= f"""Input value of [number={number}] must be an integer"""
raise TypeError(_SCREAMING_SNAKE_CASE )
if number < 1:
__A= f"""Input value of [number={number}] must be > 0"""
raise ValueError(_SCREAMING_SNAKE_CASE )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__A= int(math.log(number // 3,2 ) ) + 2
__A= [3, 5]
__A= 2
__A= 3
for block in range(1,_SCREAMING_SNAKE_CASE ):
for _ in range(_SCREAMING_SNAKE_CASE ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
UpperCAmelCase__ = 0
try:
UpperCAmelCase__ = proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 186 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __magic_name__ ( SCREAMING_SNAKE_CASE_):
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
UpperCamelCase__ : Union[str, Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCamelCase__ : Optional[int] = bertabert.config.encoder.vocab_size
UpperCamelCase__ : str = tokenizer.sep_token_id
UpperCamelCase__ : Any = tokenizer.cls_token_id
UpperCamelCase__ : Optional[int] = 128
UpperCamelCase__ : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
UpperCamelCase__ : Any = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
UpperCamelCase__ : Tuple = train_dataset.select(range(32 ) )
UpperCamelCase__ : Any = val_dataset.select(range(16 ) )
UpperCamelCase__ : List[Any] = 4
def _map_to_encoder_decoder_inputs(lowerCamelCase__ : str ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase__ : Dict = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=UpperCamelCase__ , max_length=512 )
UpperCamelCase__ : Any = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=UpperCamelCase__ , max_length=128 )
UpperCamelCase__ : Optional[Any] = inputs.input_ids
UpperCamelCase__ : List[Any] = inputs.attention_mask
UpperCamelCase__ : Optional[Any] = outputs.input_ids
UpperCamelCase__ : List[Any] = outputs.input_ids.copy()
UpperCamelCase__ : Dict = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
UpperCamelCase__ : List[Any] = outputs.attention_mask
assert all(len(UpperCamelCase__ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCamelCase__ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCamelCase__ : Union[str, Any] ):
UpperCamelCase__ : int = pred.label_ids
UpperCamelCase__ : Dict = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase__ : List[str] = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase__ : Dict = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase__ ) )] ) / len(UpperCamelCase__ )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase__ : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase__ , batch_size=UpperCamelCase__ , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
UpperCamelCase__ : Optional[int] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase__ , batch_size=UpperCamelCase__ , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
UpperCamelCase__ : Any = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[Any] = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase__ , per_device_train_batch_size=UpperCamelCase__ , per_device_eval_batch_size=UpperCamelCase__ , predict_with_generate=UpperCamelCase__ , evaluation_strategy='''steps''' , do_train=UpperCamelCase__ , do_eval=UpperCamelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase__ : List[str] = SeqaSeqTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , tokenizer=UpperCamelCase__ , )
# start training
trainer.train()
| 711 |
import math
import os
import sys
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Any = ''''''
try:
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as binary_file:
UpperCamelCase__ : int = binary_file.read()
for dat in data:
UpperCamelCase__ : Dict = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _a ( SCREAMING_SNAKE_CASE : dict[str, str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lexicon.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = last_match_id
if math.loga(SCREAMING_SNAKE_CASE ).is_integer():
for curr_key in lexicon:
UpperCamelCase__ : Any = '''0''' + lexicon[curr_key]
UpperCamelCase__ : Dict = bin(SCREAMING_SNAKE_CASE )[2:]
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Any = {'''0''': '''0''', '''1''': '''1'''}
UpperCamelCase__ , UpperCamelCase__ : Any = '''''', ''''''
UpperCamelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ : List[str] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
index += 1
UpperCamelCase__ : Dict = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCamelCase__ : Union[str, Any] = lexicon[curr_string]
result += last_match_id
return result
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : int = os.path.getsize(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = bin(SCREAMING_SNAKE_CASE )[2:]
UpperCamelCase__ : Tuple = len(SCREAMING_SNAKE_CASE )
return "0" * (length_length - 1) + file_length_binary + compressed
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Dict = 8
try:
with open(SCREAMING_SNAKE_CASE , '''wb''' ) as opened_file:
UpperCamelCase__ : Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = read_file_binary(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = compress_data(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = add_file_length(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
write_file_binary(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 106 | 0 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """bertabs"""
def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ):
super().__init__(**__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_pos
UpperCAmelCase_ = enc_layers
UpperCAmelCase_ = enc_hidden_size
UpperCAmelCase_ = enc_heads
UpperCAmelCase_ = enc_ff_size
UpperCAmelCase_ = enc_dropout
UpperCAmelCase_ = dec_layers
UpperCAmelCase_ = dec_hidden_size
UpperCAmelCase_ = dec_heads
UpperCAmelCase_ = dec_ff_size
UpperCAmelCase_ = dec_dropout
| 78 | '''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
UpperCAmelCase_ = 0
def _lowercase (self : Tuple ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__a , __a )
def _lowercase (self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop("image_processor_type" )
UpperCAmelCase_ = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def _lowercase (self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Tuple ):
with self.assertRaisesRegex(
__a , "clip-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("clip-base" )
def _lowercase (self : Optional[int] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" )
def _lowercase (self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase (self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _lowercase (self : Optional[int] ):
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : Optional[int] ):
class __A ( UpperCamelCase__ ):
a__ : str = True
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 78 | 1 |
_lowerCamelCase : Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def _a ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = input("Enter message: " )
SCREAMING_SNAKE_CASE__ : Any = input("Enter key [alphanumeric]: " )
SCREAMING_SNAKE_CASE__ : List[str] = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
SCREAMING_SNAKE_CASE__ : int = "encrypt"
SCREAMING_SNAKE_CASE__ : Any = encrypt_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif mode.lower().startswith("d" ):
SCREAMING_SNAKE_CASE__ : List[str] = "decrypt"
SCREAMING_SNAKE_CASE__ : int = decrypt_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'''\n{mode.title()}ed message:''' )
print(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
return translate_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "encrypt" )
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
return translate_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decrypt" )
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : int = key.upper()
for symbol in message:
SCREAMING_SNAKE_CASE__ : Dict = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(SCREAMING_SNAKE_CASE__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = 0
else:
translated.append(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 157 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
def wrapper(*SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE__ : List[str] = timeit.default_timer()
SCREAMING_SNAKE_CASE__ : int = func(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = timeit.default_timer() - starttime
return delta
SCREAMING_SNAKE_CASE__ : Optional[Any] = func.__name__
return wrapper
def _a ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=None ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = seq_shapes or {}
for i in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(SCREAMING_SNAKE_CASE__ , _ArrayXD ):
SCREAMING_SNAKE_CASE__ : Tuple = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(SCREAMING_SNAKE_CASE__ , datasets.Value ):
if v.dtype == "string":
SCREAMING_SNAKE_CASE__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
SCREAMING_SNAKE_CASE__ : List[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(SCREAMING_SNAKE_CASE__ , datasets.Sequence ):
while isinstance(SCREAMING_SNAKE_CASE__ , datasets.Sequence ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = v.feature
SCREAMING_SNAKE_CASE__ : Dict = seq_shapes[k]
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.rand(*SCREAMING_SNAKE_CASE__ ).astype(v.dtype )
SCREAMING_SNAKE_CASE__ : Any = data
dummy_data.append((i, example) )
return dummy_data
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=1_00 , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = generate_examples(SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ , seq_shapes=SCREAMING_SNAKE_CASE__ )
with ArrowWriter(features=SCREAMING_SNAKE_CASE__ , path=SCREAMING_SNAKE_CASE__ ) as writer:
for key, record in dummy_data:
SCREAMING_SNAKE_CASE__ : int = features.encode_example(SCREAMING_SNAKE_CASE__ )
writer.write(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.Dataset.from_file(filename=SCREAMING_SNAKE_CASE__ , info=datasets.DatasetInfo(features=SCREAMING_SNAKE_CASE__ ) )
return dataset
| 157 | 1 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCamelCase ( ) -> Any:
raise RuntimeError("""CUDA out of memory.""" )
class UpperCamelCase (nn.Module ):
def __init__( self :Tuple ) ->Optional[int]:
super().__init__()
lowercase : str = nn.Linear(3 , 4 )
lowercase : Union[str, Any] = nn.BatchNormad(4 )
lowercase : Any = nn.Linear(4 , 5 )
def __snake_case ( self :Optional[int] , __magic_name__ :str ) ->Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__lowerCAmelCase ) ) )
class UpperCamelCase (unittest.TestCase ):
def __snake_case ( self :Optional[Any] ) ->Tuple:
lowercase : List[str] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__magic_name__ :Any ):
nonlocal batch_sizes
batch_sizes.append(__lowerCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCAmelCase , [128, 64, 32, 16, 8] )
def __snake_case ( self :str ) ->Dict:
lowercase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__magic_name__ :Any , __magic_name__ :List[str] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowercase : str = mock_training_loop_function("""hello""" )
self.assertListEqual(__lowerCAmelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def __snake_case ( self :Union[str, Any] ) ->Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__magic_name__ :Optional[int] ):
pass
with self.assertRaises(__lowerCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def __snake_case ( self :List[str] ) ->List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__magic_name__ :Optional[Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def __snake_case ( self :List[Any] ) ->Optional[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Dict ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCAmelCase ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def __snake_case ( self :Any ) ->int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__magic_name__ :Union[str, Any] ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(__lowerCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def __snake_case ( self :List[str] ) ->Optional[Any]:
lowercase : int = torch.cuda.memory_allocated()
lowercase : List[str] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCAmelCase )
lowercase : List[str] = release_memory(__lowerCAmelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCAmelCase )
| 264 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
UpperCamelCase =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
UpperCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 208 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( A ):
if len(_lowercase ) == 0:
return []
UpperCAmelCase_ = min(_lowercase ), max(_lowercase )
UpperCAmelCase_ = int(max_value - min_value ) + 1
UpperCAmelCase_ = [[] for _ in range(_lowercase )]
for i in my_list:
buckets[int(i - min_value )].append(_lowercase )
return [v for bucket in buckets for v in sorted(_lowercase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 709 |
def __lowerCAmelCase ( A ):
UpperCAmelCase_ = generate_pascal_triangle(A )
for row_idx in range(A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def __lowerCAmelCase ( A ):
if not isinstance(A , A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase_ = []
for current_row_idx in range(A ):
UpperCAmelCase_ = populate_current_row(A , A )
triangle.append(A )
return triangle
def __lowerCAmelCase ( A , A ):
UpperCAmelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_ , UpperCAmelCase_ = 1, 1
for current_col_idx in range(1 , A ):
calculate_current_element(
A , A , A , A )
return current_row
def __lowerCAmelCase ( A , A , A , A , ):
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_ = above_to_left_elt + above_to_right_elt
def __lowerCAmelCase ( A ):
if not isinstance(A , A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase_ = [[1]]
for row_index in range(1 , A ):
UpperCAmelCase_ = [0] + result[-1] + [0]
UpperCAmelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_ = sum(divmod(A , 2 ) )
UpperCAmelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_ = row_first_half + row_second_half
result.append(A )
return result
def __lowerCAmelCase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A , A ) -> None:
UpperCAmelCase_ = F"{func.__name__}({value})"
UpperCAmelCase_ = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(A , A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 268 | 0 |
import math
import sys
import cva
import numpy as np
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : float ):
# For applying gaussian function for each element in matrix.
__UpperCAmelCase : int = math.sqrt(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
__UpperCAmelCase : List[Any] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : float ):
# Creates a gaussian kernel of given dimension.
__UpperCAmelCase : Union[str, Any] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __lowerCamelCase ):
for j in range(0 , __lowerCamelCase ):
__UpperCAmelCase : str = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int , ):
__UpperCAmelCase : Optional[Any] = np.zeros(img.shape )
__UpperCAmelCase : int = get_gauss_kernel(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__UpperCAmelCase : int = get_slice(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = img_s - img_s[kernel_size // 2, kernel_size // 2]
__UpperCAmelCase : Optional[Any] = vec_gaussian(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = np.multiply(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : str = np.multiply(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = np.sum(__lowerCamelCase ) / np.sum(__lowerCamelCase )
__UpperCAmelCase : List[Any] = val
return imga
def lowerCamelCase__ ( __lowerCamelCase : list ):
__UpperCAmelCase : List[str] = args[1] if args[1:] else """../image_data/lena.jpg"""
__UpperCAmelCase : Optional[Any] = float(args[2] ) if args[2:] else 1.0
__UpperCAmelCase : Dict = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__UpperCAmelCase : Optional[int] = int(args[4] )
__UpperCAmelCase : List[str] = kernel_size + abs(kernel_size % 2 - 1 )
else:
__UpperCAmelCase : int = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a ,a ,a ,a : Optional[Any] = parse_args(sys.argv)
a : Optional[int] = cva.imread(filename, 0)
cva.imshow("input image", img)
a : int = img / 255
a : Union[str, Any] = out.astype("float32")
a : Any = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Optional[int] = out * 255
a : Union[str, Any] = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 63 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __magic_name__ ( A : str ):
'''simple docstring'''
a = "huggingface/label-files"
a = "imagenet-1k-id2label.json"
a = json.load(open(hf_hub_download(A, A, repo_type="dataset" ), "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = {v: k for k, v in idalabel.items()}
a = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
a = BitConfig(
conv_layer=A, num_labels=1000, idalabel=A, labelaid=A, )
return config
def __magic_name__ ( A : Tuple ):
'''simple docstring'''
if "stem.conv" in name:
a = name.replace("stem.conv", "bit.embedder.convolution" )
if "blocks" in name:
a = name.replace("blocks", "layers" )
if "head.fc" in name:
a = name.replace("head.fc", "classifier.1" )
if name.startswith("norm" ):
a = "bit." + name
if "bit" not in name and "classifier" not in name:
a = "bit.encoder." + name
return name
def __magic_name__ ( ):
'''simple docstring'''
a = "http://images.cocodataset.org/val2017/000000039769.jpg"
a = Image.open(requests.get(A, stream=A ).raw )
return im
@torch.no_grad()
def __magic_name__ ( A : Optional[int], A : int, A : int=False ):
'''simple docstring'''
a = get_config(A )
# load original model from timm
a = create_model(A, pretrained=A )
timm_model.eval()
# load state_dict of original model
a = timm_model.state_dict()
for key in state_dict.copy().keys():
a = state_dict.pop(A )
a = val.squeeze() if "head" in key else val
# load HuggingFace model
a = BitForImageClassification(A )
model.eval()
model.load_state_dict(A )
# create image processor
a = create_transform(**resolve_data_config({}, model=A ) )
a = transform.transforms
a = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
a = BitImageProcessor(
do_resize=A, size={"shortest_edge": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=A, crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]}, do_normalize=A, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
a = prepare_img()
a = transform(A ).unsqueeze(0 )
a = processor(A, return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(A, A )
# verify logits
with torch.no_grad():
a = model(A )
a = outputs.logits
print("Logits:", logits[0, :3] )
print("Predicted class:", model.config.idalabel[logits.argmax(-1 ).item()] )
a = timm_model(A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A, outputs.logits, atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__lowerCAmelCase : int = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 213 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = DownBlockaD # noqa F405
snake_case_ = 'down'
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = ResnetDownsampleBlockaD # noqa F405
snake_case_ = 'down'
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnDownBlockaD # noqa F405
snake_case_ = 'down'
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CrossAttnDownBlockaD # noqa F405
snake_case_ = 'down'
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ = 32
return init_dict, inputs_dict
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = SimpleCrossAttnDownBlockaD # noqa F405
snake_case_ = 'down'
@property
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=a_ )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = SkipDownBlockaD # noqa F405
snake_case_ = 'down'
@property
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=a_ )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnSkipDownBlockaD # noqa F405
snake_case_ = 'down'
@property
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=a_ )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = DownEncoderBlockaD # noqa F405
snake_case_ = 'down'
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return super().get_dummy_input(include_temb=a_ )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = {
"""in_channels""": 32,
"""out_channels""": 32,
}
lowerCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnDownEncoderBlockaD # noqa F405
snake_case_ = 'down'
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return super().get_dummy_input(include_temb=a_ )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ = {
"""in_channels""": 32,
"""out_channels""": 32,
}
lowerCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UNetMidBlockaD # noqa F405
snake_case_ = 'mid'
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = {
"""in_channels""": 32,
"""temb_channels""": 1_28,
}
lowerCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UNetMidBlockaDCrossAttn # noqa F405
snake_case_ = 'mid'
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ = 32
return init_dict, inputs_dict
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UNetMidBlockaDSimpleCrossAttn # noqa F405
snake_case_ = 'mid'
@property
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=a_ )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ = 32
return init_dict, inputs_dict
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UpBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = ResnetUpsampleBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CrossAttnUpBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ = 32
return init_dict, inputs_dict
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = SimpleCrossAttnUpBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : str ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=a_ , include_encoder_hidden_states=a_ )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ = 32
return init_dict, inputs_dict
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnUpBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = SkipUpBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnSkipUpBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UpDecoderBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return super().get_dummy_input(include_temb=a_ )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = {"""in_channels""": 32, """out_channels""": 32}
lowerCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnUpDecoderBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return super().get_dummy_input(include_temb=a_ )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = {"""in_channels""": 32, """out_channels""": 32}
lowerCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8]
super().test_output(a_ )
| 165 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _UpperCAmelCase ( __A : list , __A : list , __A : list , __A : list , __A : list ):
a_ : List[Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__A )] )
a_ : int = np.array(__A )
a_ : str = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __A ) ) , x.transpose() ) , __A )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _UpperCAmelCase ( __A : list , __A : list , __A : list ):
a_ : str = (1, 2, 1)
a_ : int = (1, 1, 0, 7)
a_ : Tuple = SARIMAX(
__A , exog=__A , order=__A , seasonal_order=__A )
a_ : List[Any] = model.fit(disp=__A , maxiter=6_00 , method='''nm''' )
a_ : Union[str, Any] = model_fit.predict(1 , len(__A ) , exog=[test_match] )
return result[0]
def _UpperCAmelCase ( __A : list , __A : list , __A : list ):
a_ : str = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__A , __A )
a_ : Union[str, Any] = regressor.predict(__A )
return y_pred[0]
def _UpperCAmelCase ( __A : list ):
train_user.sort()
a_ : Optional[Any] = np.percentile(__A , 25 )
a_ : Optional[Any] = np.percentile(__A , 75 )
a_ : int = qa - qa
a_ : Optional[int] = qa - (iqr * 0.1)
return low_lim
def _UpperCAmelCase ( __A : list , __A : float ):
a_ : Optional[int] = 0
a_ : List[str] = 0
for i in list_vote:
if i > actual_result:
a_ : Optional[int] = not_safe + 1
else:
if abs(abs(__A ) - abs(__A ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__lowerCAmelCase = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
__lowerCAmelCase = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
__lowerCAmelCase = Normalizer().fit_transform(data_input_df.values)
# split data
__lowerCAmelCase = normalize_df[:, 2].tolist()
__lowerCAmelCase = normalize_df[:, 0].tolist()
__lowerCAmelCase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__lowerCAmelCase = normalize_df[:, [1, 2]].tolist()
__lowerCAmelCase = x[: len(x) - 1]
__lowerCAmelCase = x[len(x) - 1 :]
# for linear regression & sarimax
__lowerCAmelCase = total_date[: len(total_date) - 1]
__lowerCAmelCase = total_user[: len(total_user) - 1]
__lowerCAmelCase = total_match[: len(total_match) - 1]
__lowerCAmelCase = total_date[len(total_date) - 1 :]
__lowerCAmelCase = total_user[len(total_user) - 1 :]
__lowerCAmelCase = total_match[len(total_match) - 1 :]
# voting system with forecasting
__lowerCAmelCase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__lowerCAmelCase = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 666 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 1 |
from jiwer import compute_measures
import datasets
a ="""\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
a ="""\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
a ="""
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Union[str, Any]):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence'),
'references': datasets.Value('string' ,id='sequence'),
}) ,codebase_urls=['https://github.com/jitsi/jiwer/'] ,reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] ,)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Any=False):
if concatenate_texts:
return compute_measures(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)["wer"]
else:
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Union[str, Any] = 0
for prediction, reference in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Any = compute_measures(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 652 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : str = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : int = features[:, labels == i]
__lowerCamelCase : Optional[int] = data.mean(1 )
# Centralize the data of class i
__lowerCamelCase : int = data - column_reshape(lowerCamelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCamelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : Union[str, Any] = np.dot(lowerCamelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = features.mean(1 )
__lowerCamelCase : Union[str, Any] = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = features[:, labels == i]
__lowerCamelCase : Union[str, Any] = data.shape[1]
__lowerCamelCase : Union[str, Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : List[str] = device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
__lowerCamelCase : Tuple = features.mean(1 )
# Center the dataset
__lowerCamelCase : Any = features - np.reshape(lowerCamelCase__ , (data_mean.size, 1) )
__lowerCamelCase : Optional[int] = np.dot(lowerCamelCase__ , centered_data.T ) / features.shape[1]
__lowerCamelCase , __lowerCamelCase : List[Any] = np.linalg.eigh(lowerCamelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowerCamelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowerCamelCase : int = np.dot(filtered_eigenvectors.T , lowerCamelCase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowerCamelCase , __lowerCamelCase : Dict = eigh(
covariance_between_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , covariance_within_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , )
__lowerCamelCase : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = np.linalg.svd(lowerCamelCase__ )
__lowerCamelCase : int = svd_matrix[:, 0:dimensions]
__lowerCamelCase : Optional[int] = np.dot(filtered_svd_matrix.T , lowerCamelCase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
__lowerCamelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowerCamelCase : Optional[int] = np.array([0, 0, 0, 1, 1] )
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Tuple = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : int = linear_discriminant_analysis(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if isinstance(lowerCamelCase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowerCamelCase : Dict = 2
__lowerCamelCase : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : Optional[Any] = principal_component_analysis(lowerCamelCase__ , lowerCamelCase__ )
if not np.allclose(lowerCamelCase__ , lowerCamelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
SCREAMING_SNAKE_CASE : List[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
SCREAMING_SNAKE_CASE : List[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138 | 0 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def __magic_name__ ( _lowerCamelCase: Tuple, _lowerCamelCase: Any, _lowerCamelCase: Optional[Any], _lowerCamelCase: str=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCAmelCase , lowerCAmelCase = True, True
lowerCAmelCase = dfs(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
return path
def __magic_name__ ( _lowerCamelCase: Optional[Any], _lowerCamelCase: Tuple ) -> str:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = -1
for i in range(_lowerCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCAmelCase = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __magic_name__ ( _lowerCamelCase: Union[str, Any], _lowerCamelCase: str ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCAmelCase , lowerCAmelCase = check_circuit_or_path(_lowerCamelCase, _lowerCamelCase )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
lowerCAmelCase = 1
if check == 2:
lowerCAmelCase = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
lowerCAmelCase = dfs(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
print(_lowerCamelCase )
def __magic_name__ ( ) -> str:
'''simple docstring'''
lowerCAmelCase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCAmelCase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCAmelCase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCAmelCase = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCAmelCase = {
1: [],
2: []
# all degree is zero
}
lowerCAmelCase = 10
check_euler(_lowerCamelCase, _lowerCamelCase )
check_euler(_lowerCamelCase, _lowerCamelCase )
check_euler(_lowerCamelCase, _lowerCamelCase )
check_euler(_lowerCamelCase, _lowerCamelCase )
check_euler(_lowerCamelCase, _lowerCamelCase )
if __name__ == "__main__":
main()
| 535 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowercase ( lowercase__ ):
lowercase = '''segformer'''
def __init__(self : str ,SCREAMING_SNAKE_CASE_ : Optional[Any]=3 ,SCREAMING_SNAKE_CASE_ : List[str]=4 ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=[2, 2, 2, 2] ,SCREAMING_SNAKE_CASE_ : List[str]=[8, 4, 2, 1] ,SCREAMING_SNAKE_CASE_ : int=[32, 64, 160, 256] ,SCREAMING_SNAKE_CASE_ : Optional[Any]=[7, 3, 3, 3] ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=[4, 2, 2, 2] ,SCREAMING_SNAKE_CASE_ : List[Any]=[1, 2, 5, 8] ,SCREAMING_SNAKE_CASE_ : Dict=[4, 4, 4, 4] ,SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" ,SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE_ : List[str]=0.1 ,SCREAMING_SNAKE_CASE_ : List[Any]=0.02 ,SCREAMING_SNAKE_CASE_ : str=0.1 ,SCREAMING_SNAKE_CASE_ : Dict=1e-6 ,SCREAMING_SNAKE_CASE_ : List[str]=256 ,SCREAMING_SNAKE_CASE_ : List[str]=255 ,**SCREAMING_SNAKE_CASE_ : Tuple ,) -> int:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' ,SCREAMING_SNAKE_CASE_ ,)
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = depths
lowerCAmelCase = sr_ratios
lowerCAmelCase = hidden_sizes
lowerCAmelCase = patch_sizes
lowerCAmelCase = strides
lowerCAmelCase = mlp_ratios
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = classifier_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = drop_path_rate
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = kwargs.get('''reshape_last_stage''' ,SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = semantic_loss_ignore_index
class lowercase ( lowercase__ ):
lowercase = version.parse('''1.11''' )
@property
def UpperCAmelCase (self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase (self : str ) -> float:
"""simple docstring"""
return 1e-4
@property
def UpperCAmelCase (self : Dict ) -> int:
"""simple docstring"""
return 12
| 535 | 1 |
"""simple docstring"""
def __a ( A ) -> int:
'''simple docstring'''
if not isinstance(A , A ):
A__ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(A )
if number < 1:
A__ = f"""Input value of [number={number}] must be > 0"""
raise ValueError(A )
A__ = 1
for i in range(1 , A ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 702 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __a ( A , A=7 ) -> Dict:
'''simple docstring'''
A__ = None
if token is not None:
A__ = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
A__ = "636036"
A__ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
A__ = requests.get(A , headers=A ).json()
return result["workflow_runs"]
def __a ( A ) -> Tuple:
'''simple docstring'''
A__ = get_daily_ci_runs(A )
A__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
A__ = workflow_run["id"]
break
return workflow_run_id
def __a ( A , A , A ) -> List[Any]:
'''simple docstring'''
A__ = get_last_daily_ci_runs(A )
if workflow_run_id is not None:
A__ = get_artifacts_links(worflow_run_id=A , token=A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
A__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=A , artifact_url=A , output_dir=A , token=A )
def __a ( A , A , A ) -> Union[str, Any]:
'''simple docstring'''
get_last_daily_ci_artifacts(A , A , A )
A__ = {}
for artifact_name in artifact_names:
A__ = os.path.join(A , f"""{artifact_name}.zip""" )
if os.path.isfile(A ):
A__ = {}
with zipfile.ZipFile(A ) as z:
for filename in z.namelist():
if not os.path.isdir(A ):
# read the file
with z.open(A ) as f:
A__ = f.read().decode("UTF-8" )
return results | 261 | 0 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
A_ = logging.getLogger(__name__)
if __name__ == "__main__":
A_ = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_0_5_2_2, type=int)
A_ = parser.parse_args()
logger.info(F'Loading data from {args.data_file}')
with open(args.data_file, "rb") as fp:
A_ = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
A_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
A_ = [0] * args.vocab_size
for k, v in counter.items():
A_ = v
logger.info(F'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 393 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
A_ = "sshleifer/bart-tiny-random"
A_ = "patrickvonplaten/t5-tiny-random"
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return AutoConfig.from_pretrained(lowerCAmelCase_ )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,*SCREAMING_SNAKE_CASE_ = create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,*SCREAMING_SNAKE_CASE_ = create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase_ )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,*SCREAMING_SNAKE_CASE_ = create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,*SCREAMING_SNAKE_CASE_ = create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=lowerCAmelCase_ , d=lowerCAmelCase_ )
| 393 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
UpperCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
UpperCAmelCase = '''CIDAS/clipseg-rd64-refined'''
UpperCAmelCase = '''image_segmenter'''
UpperCAmelCase = CLIPSegForImageSegmentation
UpperCAmelCase = ['''image''', '''text''']
UpperCAmelCase = ['''image''']
def __init__( self :Dict ,*__UpperCAmelCase :str ,**__UpperCAmelCase :Optional[Any] ) -> Dict:
"""simple docstring"""
requires_backends(self ,['''vision'''] )
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
def lowercase_ ( self :Optional[Any] ,__UpperCAmelCase :"Image" ,__UpperCAmelCase :str ) -> Any:
"""simple docstring"""
return self.pre_processor(text=[label] ,images=[image] ,padding=__UpperCAmelCase ,return_tensors='''pt''' )
def lowercase_ ( self :List[str] ,__UpperCAmelCase :Tuple ) -> Any:
"""simple docstring"""
with torch.no_grad():
lowerCamelCase__ : List[str] = self.model(**__UpperCAmelCase ).logits
return logits
def lowercase_ ( self :str ,__UpperCAmelCase :Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : int = outputs.cpu().detach().numpy()
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 121 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( _lowercase , _lowercase , _lowercase=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCamelCase__ : Any = nn.Parameter(_lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCamelCase__ : Optional[int] = nn.Parameter(_lowercase )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : int = np.asarray(weights[0] )
lowerCamelCase__ : List[Any] = np.asarray(weights[1] )
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowercase ).view(-1 , _lowercase ).contiguous().transpose(0 , 1 ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = np.asarray(weights[0] )
lowerCamelCase__ : str = np.asarray(weights[1] )
lowerCamelCase__ : str = np.asarray(weights[2] )
lowerCamelCase__ : Optional[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowercase ).view(-1 , _lowercase ).contiguous().transpose(0 , 1 ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = weights[0][0][0]
lowerCamelCase__ : List[Any] = np.asarray(layer_norm_a[0] )
lowerCamelCase__ : Any = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# lsh weights + output
lowerCamelCase__ : Any = weights[0][1]
if len(_lowercase ) < 4:
set_layer_weights_in_torch_lsh(_lowercase , torch_block.attention , _lowercase )
else:
set_layer_weights_in_torch_local(_lowercase , torch_block.attention , _lowercase )
# intermediate weighs
lowerCamelCase__ : Optional[int] = weights[2][0][1][2]
# Chunked Feed Forward
if len(_lowercase ) == 4:
lowerCamelCase__ : Tuple = intermediate_weights[2]
# layernorm 2
lowerCamelCase__ : Optional[Any] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase__ : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# intermediate dense
lowerCamelCase__ : List[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase__ : Dict = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
# intermediate out
lowerCamelCase__ : Union[str, Any] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase__ : Optional[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : int = torch_model.reformer
# word embeds
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_lowercase ) , )
if isinstance(weights[3] , _lowercase ):
lowerCamelCase__ : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase__ : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCamelCase__ : List[Any] = nn.Parameter(torch.tensor(_lowercase ) )
lowerCamelCase__ : Optional[int] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase__ : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_lowercase , _lowercase , _lowercase )
# output layer norm
lowerCamelCase__ : Optional[int] = np.asarray(weights[7][0] )
lowerCamelCase__ : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# output embeddings
lowerCamelCase__ : Any = np.asarray(weights[9][0] )
lowerCamelCase__ : Tuple = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = ReformerConfig.from_json_file(_lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase__ : List[Any] = ReformerModelWithLMHead(_lowercase )
with open(_lowercase , '''rb''' ) as f:
lowerCamelCase__ : Optional[Any] = pickle.load(_lowercase )['''weights''']
set_model_weights_in_torch(_lowercase , _lowercase , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 121 | 1 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __lowerCAmelCase ( UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self: Optional[int] , UpperCamelCase_: int = 128 , UpperCamelCase_: int = 256 , UpperCamelCase_: float = 2000.0 , UpperCamelCase_: int = 768 , UpperCamelCase_: int = 12 , UpperCamelCase_: int = 12 , UpperCamelCase_: int = 64 , UpperCamelCase_: int = 2048 , UpperCamelCase_: float = 0.1 , ):
super().__init__()
UpperCamelCase_ =nn.Sequential(
nn.Linear(UpperCamelCase_ , d_model * 4 , bias=UpperCamelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase_ ) , nn.SiLU() , )
UpperCamelCase_ =nn.Embedding(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ =False
UpperCamelCase_ =nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
UpperCamelCase_ =nn.Dropout(p=UpperCamelCase_ )
UpperCamelCase_ =nn.ModuleList()
for lyr_num in range(UpperCamelCase_ ):
# FiLM conditional T5 decoder
UpperCamelCase_ =DecoderLayer(d_model=UpperCamelCase_ , d_kv=UpperCamelCase_ , num_heads=UpperCamelCase_ , d_ff=UpperCamelCase_ , dropout_rate=UpperCamelCase_ )
self.decoders.append(UpperCamelCase_ )
UpperCamelCase_ =TaLayerNorm(UpperCamelCase_ )
UpperCamelCase_ =nn.Dropout(p=UpperCamelCase_ )
UpperCamelCase_ =nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
def UpperCamelCase__ ( self: str , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int] ):
UpperCamelCase_ =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: int ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase_ =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase_ =self.conditioning_emb(UpperCamelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase_ =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase_ =torch.broadcast_to(
torch.arange(UpperCamelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase_ =self.position_encoding(UpperCamelCase_ )
UpperCamelCase_ =self.continuous_inputs_projection(UpperCamelCase_ )
inputs += position_encodings
UpperCamelCase_ =self.dropout(UpperCamelCase_ )
# decoder: No padding present.
UpperCamelCase_ =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase_ =[(x, self.encoder_decoder_mask(UpperCamelCase_ , UpperCamelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase_ =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase_ =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase_ =lyr(
UpperCamelCase_ , conditioning_emb=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )[0]
UpperCamelCase_ =self.decoder_norm(UpperCamelCase_ )
UpperCamelCase_ =self.post_dropout(UpperCamelCase_ )
UpperCamelCase_ =self.spec_out(UpperCamelCase_ )
return spec_out
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str]=1e-6 ):
super().__init__()
UpperCamelCase_ =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase_ , d_kv=UpperCamelCase_ , num_heads=UpperCamelCase_ , dropout_rate=UpperCamelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase_ , d_kv=UpperCamelCase_ , num_heads=UpperCamelCase_ , dropout_rate=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase_ , d_ff=UpperCamelCase_ , dropout_rate=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ ) )
def UpperCamelCase__ ( self: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Tuple=None , ):
UpperCamelCase_ =self.layer[0](
UpperCamelCase_ , conditioning_emb=UpperCamelCase_ , attention_mask=UpperCamelCase_ , )
if encoder_hidden_states is not None:
UpperCamelCase_ =torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
UpperCamelCase_ =self.layer[1](
UpperCamelCase_ , key_value_states=UpperCamelCase_ , attention_mask=UpperCamelCase_ , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase_ =self.layer[-1](UpperCamelCase_ , UpperCamelCase_ )
return (hidden_states,)
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str ):
super().__init__()
UpperCamelCase_ =TaLayerNorm(UpperCamelCase_ )
UpperCamelCase_ =TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase_ )
UpperCamelCase_ =Attention(query_dim=UpperCamelCase_ , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , out_bias=UpperCamelCase_ , scale_qk=UpperCamelCase_ )
UpperCamelCase_ =nn.Dropout(UpperCamelCase_ )
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str=None , UpperCamelCase_: List[str]=None , ):
# pre_self_attention_layer_norm
UpperCamelCase_ =self.layer_norm(UpperCamelCase_ )
if conditioning_emb is not None:
UpperCamelCase_ =self.FiLMLayer(UpperCamelCase_ , UpperCamelCase_ )
# Self-attention block
UpperCamelCase_ =self.attention(UpperCamelCase_ )
UpperCamelCase_ =hidden_states + self.dropout(UpperCamelCase_ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ):
super().__init__()
UpperCamelCase_ =Attention(query_dim=UpperCamelCase_ , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , out_bias=UpperCamelCase_ , scale_qk=UpperCamelCase_ )
UpperCamelCase_ =TaLayerNorm(UpperCamelCase_ , eps=UpperCamelCase_ )
UpperCamelCase_ =nn.Dropout(UpperCamelCase_ )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str=None , UpperCamelCase_: int=None , ):
UpperCamelCase_ =self.layer_norm(UpperCamelCase_ )
UpperCamelCase_ =self.attention(
UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase_ =hidden_states + self.dropout(UpperCamelCase_ )
return layer_output
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple ):
super().__init__()
UpperCamelCase_ =TaDenseGatedActDense(d_model=UpperCamelCase_ , d_ff=UpperCamelCase_ , dropout_rate=UpperCamelCase_ )
UpperCamelCase_ =TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase_ )
UpperCamelCase_ =TaLayerNorm(UpperCamelCase_ , eps=UpperCamelCase_ )
UpperCamelCase_ =nn.Dropout(UpperCamelCase_ )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: int=None ):
UpperCamelCase_ =self.layer_norm(UpperCamelCase_ )
if conditioning_emb is not None:
UpperCamelCase_ =self.film(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ =self.DenseReluDense(UpperCamelCase_ )
UpperCamelCase_ =hidden_states + self.dropout(UpperCamelCase_ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any] ):
super().__init__()
UpperCamelCase_ =nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
UpperCamelCase_ =nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
UpperCamelCase_ =nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
UpperCamelCase_ =nn.Dropout(UpperCamelCase_ )
UpperCamelCase_ =NewGELUActivation()
def UpperCamelCase__ ( self: int , UpperCamelCase_: Optional[int] ):
UpperCamelCase_ =self.act(self.wi_a(UpperCamelCase_ ) )
UpperCamelCase_ =self.wi_a(UpperCamelCase_ )
UpperCamelCase_ =hidden_gelu * hidden_linear
UpperCamelCase_ =self.dropout(UpperCamelCase_ )
UpperCamelCase_ =self.wo(UpperCamelCase_ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self: int , UpperCamelCase_: Dict , UpperCamelCase_: int=1e-6 ):
super().__init__()
UpperCamelCase_ =nn.Parameter(torch.ones(UpperCamelCase_ ) )
UpperCamelCase_ =eps
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[Any] ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
UpperCamelCase_ =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase_ )
UpperCamelCase_ =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase_ =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(UpperCamelCase_ , 3.0 )) ))
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] ):
super().__init__()
UpperCamelCase_ =nn.Linear(UpperCamelCase_ , out_features * 2 , bias=UpperCamelCase_ )
def UpperCamelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
UpperCamelCase_ =self.scale_bias(UpperCamelCase_ )
UpperCamelCase_ , UpperCamelCase_ =torch.chunk(UpperCamelCase_ , 2 , -1 )
UpperCamelCase_ =x * (1 + scale) + shift
return x
| 391 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 391 | 1 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : Union[str, Any] = seq_length
__UpperCamelCase : List[Any] = is_training
__UpperCamelCase : List[str] = use_input_mask
__UpperCamelCase : Optional[int] = use_token_type_ids
__UpperCamelCase : Any = use_labels
__UpperCamelCase : List[Any] = vocab_size
__UpperCamelCase : Any = hidden_size
__UpperCamelCase : int = num_hidden_layers
__UpperCamelCase : Optional[int] = num_attention_heads
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : Union[str, Any] = hidden_act
__UpperCamelCase : List[str] = hidden_dropout_prob
__UpperCamelCase : Optional[int] = attention_probs_dropout_prob
__UpperCamelCase : Any = max_position_embeddings
__UpperCamelCase : Tuple = type_vocab_size
__UpperCamelCase : Optional[Any] = type_sequence_label_size
__UpperCamelCase : Optional[Any] = initializer_range
__UpperCamelCase : int = num_labels
__UpperCamelCase : Any = num_choices
__UpperCamelCase : Optional[Any] = scope
def a_ (self ) -> Tuple:
__UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : str = None
if self.use_token_type_ids:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : Tuple = None
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ (self ) -> Optional[int]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
__UpperCamelCase : Optional[int] = BioGptModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> List[Any]:
__UpperCamelCase : Optional[Any] = BioGptForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Any = BioGptModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# create attention mask
__UpperCamelCase : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = self.seq_length // 2
__UpperCamelCase : Dict = 0
# first forward pass
__UpperCamelCase , __UpperCamelCase : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__UpperCamelCase : List[str] = ids_tensor((1,) , _UpperCAmelCase ).item() + 1
__UpperCamelCase : Dict = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__UpperCamelCase : Any = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCamelCase : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_UpperCAmelCase )] , dim=1 , )
# get two different outputs
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Tuple = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCamelCase : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ) -> Optional[Any]:
__UpperCamelCase : Union[str, Any] = BioGptModel(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Any = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCAmelCase )
# first forward pass
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase : Optional[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__UpperCamelCase : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__UpperCamelCase : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[
"last_hidden_state"
]
# select random slice
__UpperCamelCase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = BioGptForCausalLM(_UpperCAmelCase )
model.to(_UpperCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCamelCase : Tuple = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def a_ (self , _UpperCAmelCase , *_UpperCAmelCase ) -> str:
__UpperCamelCase : Union[str, Any] = BioGptModel(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : Optional[Any] = self.num_labels
__UpperCamelCase : List[str] = BioGptForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ (self ) -> int:
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = config_and_inputs
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
A = (BioGptForCausalLM,) if is_torch_available() else ()
A = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
def a_ (self ) -> Dict:
__UpperCamelCase : Any = BioGptModelTester(self )
__UpperCamelCase : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase : List[str] = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Any:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_UpperCAmelCase , gradient_checkpointing=_UpperCAmelCase )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_UpperCAmelCase )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Union[str, Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(_UpperCAmelCase )
__UpperCamelCase : List[str] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__UpperCamelCase : int = "left"
# Define PAD Token = EOS Token = 50256
__UpperCamelCase : Dict = tokenizer.eos_token
__UpperCamelCase : int = model.config.eos_token_id
# use different length sentences to test batching
__UpperCamelCase : int = [
"Hello, my dog is a little",
"Today, I",
]
__UpperCamelCase : List[str] = tokenizer(_UpperCAmelCase , return_tensors="pt" , padding=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = inputs["input_ids"].to(_UpperCAmelCase )
__UpperCamelCase : int = model.generate(
input_ids=_UpperCAmelCase , attention_mask=inputs["attention_mask"].to(_UpperCAmelCase ) , )
__UpperCamelCase : Optional[Any] = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(_UpperCAmelCase )
__UpperCamelCase : List[Any] = model.generate(input_ids=_UpperCAmelCase )
__UpperCamelCase : Dict = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
__UpperCamelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(_UpperCAmelCase )
__UpperCamelCase : List[str] = model.generate(input_ids=_UpperCAmelCase , max_length=model.config.max_length - num_paddings )
__UpperCamelCase : Tuple = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__UpperCamelCase : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCAmelCase )
__UpperCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCAmelCase )
__UpperCamelCase : List[str] = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def a_ (self ) -> Dict:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[int] = BioGptModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Tuple = 3
__UpperCamelCase : List[str] = input_dict["input_ids"]
__UpperCamelCase : List[str] = input_ids.ne(1 ).to(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : List[str] = BioGptForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Dict = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : int = 3
__UpperCamelCase : int = "multi_label_classification"
__UpperCamelCase : Optional[Any] = input_dict["input_ids"]
__UpperCamelCase : List[str] = input_ids.ne(1 ).to(_UpperCAmelCase )
__UpperCamelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCamelCase : Optional[int] = BioGptForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> str:
__UpperCamelCase : List[str] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
__UpperCamelCase : Dict = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = 4_2_3_8_4
__UpperCamelCase : List[Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : str = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def a_ (self ) -> Any:
__UpperCamelCase : Any = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__UpperCamelCase : Any = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(_UpperCAmelCase )
torch.manual_seed(0 )
__UpperCamelCase : List[str] = tokenizer("COVID-19 is" , return_tensors="pt" ).to(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model.generate(
**_UpperCAmelCase , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=_UpperCAmelCase , )
__UpperCamelCase : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCAmelCase )
__UpperCamelCase : int = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 399 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=True , snake_case__="pt" ):
__UpperCamelCase : List[str] = {"add_prefix_space": True} if isinstance(snake_case__ , snake_case__ ) and not line.startswith(" " ) else {}
__UpperCamelCase : int = padding_side
return tokenizer(
[line] , max_length=snake_case__ , padding="max_length" if pad_to_max_length else None , truncation=snake_case__ , return_tensors=snake_case__ , add_special_tokens=snake_case__ , **snake_case__ , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=None , ):
__UpperCamelCase : Union[str, Any] = input_ids.ne(snake_case__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="train" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="" , ) -> List[str]:
super().__init__()
__UpperCamelCase : List[str] = Path(_UpperCAmelCase ).joinpath(type_path + ".source" )
__UpperCamelCase : Dict = Path(_UpperCAmelCase ).joinpath(type_path + ".target" )
__UpperCamelCase : int = self.get_char_lens(self.src_file )
__UpperCamelCase : Optional[int] = max_source_length
__UpperCamelCase : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
__UpperCamelCase : str = tokenizer
__UpperCamelCase : Optional[Any] = prefix
if n_obs is not None:
__UpperCamelCase : Tuple = self.src_lens[:n_obs]
__UpperCamelCase : Optional[Any] = src_lang
__UpperCamelCase : Any = tgt_lang
def __len__(self ) -> List[Any]:
return len(self.src_lens )
def __getitem__(self , _UpperCAmelCase ) -> Dict[str, torch.Tensor]:
__UpperCamelCase : str = index + 1 # linecache starts at 1
__UpperCamelCase : int = self.prefix + linecache.getline(str(self.src_file ) , _UpperCAmelCase ).rstrip("\n" )
__UpperCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , _UpperCAmelCase ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase : int = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCAmelCase ) else self.tokenizer
)
__UpperCamelCase : str = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCAmelCase ) else self.tokenizer
__UpperCamelCase : str = encode_line(_UpperCAmelCase , _UpperCAmelCase , self.max_source_length , "right" )
__UpperCamelCase : List[Any] = encode_line(_UpperCAmelCase , _UpperCAmelCase , self.max_target_length , "right" )
__UpperCamelCase : str = source_inputs["input_ids"].squeeze()
__UpperCamelCase : List[Any] = target_inputs["input_ids"].squeeze()
__UpperCamelCase : int = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a_ (_UpperCAmelCase ) -> Optional[int]:
return [len(_UpperCAmelCase ) for x in Path(_UpperCAmelCase ).open().readlines()]
def a_ (self , _UpperCAmelCase ) -> Dict[str, torch.Tensor]:
__UpperCamelCase : str = torch.stack([x["input_ids"] for x in batch] )
__UpperCamelCase : Union[str, Any] = torch.stack([x["attention_mask"] for x in batch] )
__UpperCamelCase : Any = torch.stack([x["decoder_input_ids"] for x in batch] )
__UpperCamelCase : List[str] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCAmelCase )
else self.tokenizer.pad_token_id
)
__UpperCamelCase : Optional[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCAmelCase )
else self.tokenizer.pad_token_id
)
__UpperCamelCase : int = trim_batch(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase : Dict = trim_batch(_UpperCAmelCase , _UpperCAmelCase , attention_mask=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
_lowerCAmelCase = getLogger(__name__)
def __lowerCAmelCase ( snake_case__ ):
return list(itertools.chain.from_iterable(snake_case__ ) )
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[str] = get_git_info()
save_json(snake_case__ , os.path.join(snake_case__ , "git_log.json" ) )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=4 , **snake_case__ ):
with open(snake_case__ , "w" ) as f:
json.dump(snake_case__ , snake_case__ , indent=snake_case__ , **snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
with open(snake_case__ ) as f:
return json.load(snake_case__ )
def __lowerCAmelCase ( ):
__UpperCamelCase : Optional[Any] = git.Repo(search_parent_directories=snake_case__ )
__UpperCamelCase : Optional[int] = {
"repo_id": str(snake_case__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
return list(map(snake_case__ , snake_case__ ) )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
with open(snake_case__ , "wb" ) as f:
return pickle.dump(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
def remove_articles(snake_case__ ):
return re.sub(r"\b(a|an|the)\b" , " " , snake_case__ )
def white_space_fix(snake_case__ ):
return " ".join(text.split() )
def remove_punc(snake_case__ ):
__UpperCamelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = normalize_answer(snake_case__ ).split()
__UpperCamelCase : Any = normalize_answer(snake_case__ ).split()
__UpperCamelCase : List[str] = Counter(snake_case__ ) & Counter(snake_case__ )
__UpperCamelCase : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase : List[Any] = 1.0 * num_same / len(snake_case__ )
__UpperCamelCase : List[str] = 1.0 * num_same / len(snake_case__ )
__UpperCamelCase : int = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
return normalize_answer(snake_case__ ) == normalize_answer(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
assert len(snake_case__ ) == len(snake_case__ )
__UpperCamelCase : Optional[int] = 0
for hypo, pred in zip(snake_case__ , snake_case__ ):
em += exact_match_score(snake_case__ , snake_case__ )
if len(snake_case__ ) > 0:
em /= len(snake_case__ )
return {"em": em}
def __lowerCAmelCase ( snake_case__ ):
return model_prefix.startswith("rag" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase : Tuple = "dropout_rate"
for p in extra_params:
if getattr(snake_case__ , snake_case__ , snake_case__ ):
if not hasattr(snake_case__ , snake_case__ ) and not hasattr(snake_case__ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(snake_case__ ) )
delattr(snake_case__ , snake_case__ )
continue
__UpperCamelCase : Optional[Any] = p if hasattr(snake_case__ , snake_case__ ) else equivalent_param[p]
setattr(snake_case__ , snake_case__ , getattr(snake_case__ , snake_case__ ) )
delattr(snake_case__ , snake_case__ )
return hparams, config
| 399 | 1 |
'''simple docstring'''
def A_( A : int = 5000_0000):
UpperCamelCase = set()
UpperCamelCase = int((limit - 24) ** (1 / 2))
UpperCamelCase = set(range(3 , prime_square_limit + 1 , 2))
primes.add(2)
for p in range(3 , prime_square_limit + 1 , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , A)))
for primea in primes:
UpperCamelCase = primea * primea
for primea in primes:
UpperCamelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase = primea * primea * primea * primea
UpperCamelCase = square + cube + tetr
if total >= limit:
break
ret.add(A)
return len(A)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 3 |
"""simple docstring"""
def A_ ( snake_case__ , snake_case__ = " " ) -> list:
_UpperCamelCase :List[str] = []
_UpperCamelCase :int = 0
for index, char in enumerate(snake_case__ ):
if char == separator:
split_words.append(string[last_index:index] )
_UpperCamelCase :Dict = index + 1
elif index + 1 == len(snake_case__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
snake_case_ : List[Any] = None
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[Any] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
snake_case_ : Dict = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
snake_case_ : Any = '▁'
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''token_type_ids''']
_snake_case = FNetTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="<unk>" , lowerCamelCase__="[SEP]" , lowerCamelCase__="<pad>" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = (
AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ , normalized=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else mask_token
)
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 721 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ):
UpperCamelCase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
))
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''', type=_UpperCAmelCase, default=1, help='''Number of TPU cores to use (1 or 8).''')
# positional
parser.add_argument(
'''training_script''', type=_UpperCAmelCase, help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
), )
# rest from the training program
parser.add_argument('''training_script_args''', nargs=_UpperCAmelCase)
return parser.parse_args()
def __snake_case ( ):
UpperCamelCase = parse_args()
# Import training_script as a module.
UpperCamelCase = Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
UpperCamelCase = script_fpath.stem
UpperCamelCase = importlib.import_module(_UpperCAmelCase)
# Patch sys.argv
UpperCamelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores)]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores)
if __name__ == "__main__":
main()
| 350 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def a ( __snake_case : List[str] ):
'''simple docstring'''
return np.maximum(0, _lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 608 |
from math import factorial
class _UpperCAmelCase :
def __init__( self , a__ , a__ ):
A_ : Optional[int] = real
if isinstance(a__ , a__ ):
A_ : str = [1] * rank
else:
A_ : str = rank
def __repr__( self ):
return (
F"""{self.real}+"""
F"""{'+'.join(str(a__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _lowerCamelCase ( self ):
A_ : List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , a__ )
def __add__( self , a__ ):
if not isinstance(a__ , a__ ):
return Dual(self.real + other , self.duals )
A_ : List[Any] = self.duals.copy()
A_ : Tuple = other.duals.copy()
if len(a__ ) > len(a__ ):
o_dual.extend([1] * (len(a__ ) - len(a__ )) )
elif len(a__ ) < len(a__ ):
s_dual.extend([1] * (len(a__ ) - len(a__ )) )
A_ : str = []
for i in range(len(a__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , a__ )
a = __add__
def __sub__( self , a__ ):
return self + other * -1
def __mul__( self , a__ ):
if not isinstance(a__ , a__ ):
A_ : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , a__ )
A_ : Tuple = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , a__ )
a = __mul__
def __truediv__( self , a__ ):
if not isinstance(a__ , a__ ):
A_ : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , a__ )
raise ValueError
def __floordiv__( self , a__ ):
if not isinstance(a__ , a__ ):
A_ : Optional[int] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , a__ )
raise ValueError
def __pow__( self , a__ ):
if n < 0 or isinstance(a__ , a__ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
A_ : str = self
for _ in range(n - 1 ):
x *= self
return x
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
if not callable(_lowerCAmelCase ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(_lowerCAmelCase ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError("""differentiate() requires an int as input for order""" )
A_ : List[str] = Dual(_lowerCAmelCase ,1 )
A_ : Optional[int] = func(_lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 569 | 0 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = """EncodecFeatureExtractor"""
UpperCAmelCase__ = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , a__ , a__):
"""simple docstring"""
super().__init__(a__ , a__)
_lowerCamelCase : Union[str, Any] = self.feature_extractor
_lowerCamelCase : Optional[Any] = False
def __snake_case ( self , a__=None , a__=None , a__=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=a__ , language=a__ , no_timestamps=a__)
def __call__( self , *a__ , **a__):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*a__ , **a__)
_lowerCamelCase : Any = kwargs.pop('''audio''' , a__)
_lowerCamelCase : str = kwargs.pop('''sampling_rate''' , a__)
_lowerCamelCase : List[str] = kwargs.pop('''text''' , a__)
if len(a__) > 0:
_lowerCamelCase : Optional[int] = args[0]
_lowerCamelCase : str = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if text is not None:
_lowerCamelCase : str = self.tokenizer(a__ , **a__)
if audio is not None:
_lowerCamelCase : Dict = self.feature_extractor(a__ , *a__ , sampling_rate=a__ , **a__)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_lowerCamelCase : Dict = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
_lowerCamelCase : Union[str, Any] = audio_inputs['''padding_mask''']
return inputs
def __snake_case ( self , *a__ , **a__):
"""simple docstring"""
_lowerCamelCase : str = kwargs.pop('''audio''' , a__)
_lowerCamelCase : str = kwargs.pop('''padding_mask''' , a__)
if len(a__) > 0:
_lowerCamelCase : str = args[0]
_lowerCamelCase : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(a__ , padding_mask=a__)
else:
return self.tokenizer.batch_decode(*a__ , **a__)
def __snake_case ( self , *a__ , **a__):
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__)
def __snake_case ( self , a__ , a__ = None):
"""simple docstring"""
_lowerCamelCase : int = to_numpy(a__)
_lowerCamelCase : Dict = audio_values.shape
if padding_mask is None:
return list(a__)
_lowerCamelCase : List[Any] = to_numpy(a__)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_lowerCamelCase : List[Any] = seq_len - padding_mask.shape[-1]
_lowerCamelCase : Any = 1 - self.feature_extractor.padding_value
_lowerCamelCase : Tuple = np.pad(a__ , ((0, 0), (0, difference)) , '''constant''' , constant_values=a__)
_lowerCamelCase : Tuple = audio_values.tolist()
for i in range(a__):
_lowerCamelCase : List[str] = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_lowerCamelCase : int = sliced_audio.reshape(a__ , -1)
return audio_values
| 718 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCamelCase = logging.get_logger(__name__)
@dataclass
class __A :
"""simple docstring"""
UpperCAmelCase__ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
UpperCAmelCase__ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
UpperCAmelCase__ = field(
default=128 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
UpperCAmelCase__ = field(
default=lowerCamelCase__ ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = self.task_name.lower()
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = """train"""
UpperCAmelCase__ = """dev"""
UpperCAmelCase__ = """test"""
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self , a__ , a__ , a__ = None , a__ = Split.train , a__ = None , ):
"""simple docstring"""
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , a__ , )
_lowerCamelCase : Optional[Any] = args
_lowerCamelCase : Tuple = glue_processors[args.task_name]()
_lowerCamelCase : Any = glue_output_modes[args.task_name]
if isinstance(a__ , a__):
try:
_lowerCamelCase : List[Any] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''')
# Load data features from cache or dataset file
_lowerCamelCase : Tuple = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_lowerCamelCase : int = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = label_list[2], label_list[1]
_lowerCamelCase : str = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Any = cached_features_file + '''.lock'''
with FileLock(a__):
if os.path.exists(a__) and not args.overwrite_cache:
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = torch.load(a__)
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
_lowerCamelCase : List[str] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
_lowerCamelCase : str = self.processor.get_test_examples(args.data_dir)
else:
_lowerCamelCase : List[Any] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
_lowerCamelCase : List[Any] = examples[:limit_length]
_lowerCamelCase : List[str] = glue_convert_examples_to_features(
a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , )
_lowerCamelCase : int = time.time()
torch.save(self.features , a__)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self):
"""simple docstring"""
return len(self.features)
def __getitem__( self , a__):
"""simple docstring"""
return self.features[i]
def __snake_case ( self):
"""simple docstring"""
return self.label_list
| 613 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = 100_0000 ):
lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 4 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : List[str] ):
if isinstance(snake_case_ , snake_case_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
snake_case__ : Optional[Any] = deepcopy(snake_case_ )
elif os.path.exists(snake_case_ ):
with io.open(snake_case_ , """r""" , encoding="""utf-8""" ) as f:
snake_case__ : Union[str, Any] = json.load(snake_case_ )
else:
try:
snake_case__ : List[Any] = baseaa.urlsafe_baadecode(snake_case_ ).decode("""utf-8""" )
snake_case__ : Optional[int] = json.loads(snake_case_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" )
snake_case__ : str = config
self.set_stage_and_offload()
def lowerCamelCase ( self : int ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
snake_case__ : Union[str, Any] = self.get_value("""zero_optimization.stage""" , -1 )
# offload
snake_case__ : Optional[Any] = False
if self.is_zeroa() or self.is_zeroa():
snake_case__ : int = set(["""cpu""", """nvme"""] )
snake_case__ : Any = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
snake_case__ : List[Any] = True
def lowerCamelCase ( self : Any , snake_case_ : Optional[Any] ):
snake_case__ : Optional[Any] = self.config
# find the config node of interest if it exists
snake_case__ : List[Any] = ds_key_long.split(""".""" )
snake_case__ : Tuple = nodes.pop()
for node in nodes:
snake_case__ : Optional[Any] = config.get(snake_case_ )
if config is None:
return None, ds_key
return config, ds_key
def lowerCamelCase ( self : List[Any] , snake_case_ : int , snake_case_ : List[str]=None ):
snake_case__ , snake_case__ : Dict = self.find_config_node(snake_case_ )
if config is None:
return default
return config.get(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Dict , snake_case_ : Optional[int] , snake_case_ : int=False ):
snake_case__ : Dict = self.config
# find the config node of interest if it exists
snake_case__ : str = ds_key_long.split(""".""" )
for node in nodes:
snake_case__ : Union[str, Any] = config
snake_case__ : List[Any] = config.get(snake_case_ )
if config is None:
if must_exist:
raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(snake_case_ )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] ):
snake_case__ : Dict = self.get_value(snake_case_ )
return False if value is None else bool(snake_case_ )
def lowerCamelCase ( self : List[str] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = self.get_value(snake_case_ )
return False if value is None else not bool(snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
return self._stage == 2
def lowerCamelCase ( self : Dict ):
return self._stage == 3
def lowerCamelCase ( self : List[Any] ):
return self._offload
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Any , snake_case_ : List[Any] ):
snake_case__ : Tuple = engine
def lowerCamelCase ( self : Dict , snake_case_ : Optional[Any] , **snake_case_ : int ):
# runs backpropagation and handles mixed precision
self.engine.backward(snake_case_ , **snake_case_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : Any ):
super().__init__(snake_case_ , device_placement=snake_case_ , scaler=snake_case_ )
snake_case__ : Optional[int] = hasattr(self.optimizer , """overflow""" )
def lowerCamelCase ( self : int , snake_case_ : Optional[Any]=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowerCamelCase ( self : List[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowerCamelCase ( self : List[str] ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple ):
super().__init__(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : Dict , snake_case_ : int=0.001 , snake_case_ : Optional[Any]=0 , **snake_case_ : List[Any] ):
snake_case__ : int = params
snake_case__ : Optional[Any] = lr
snake_case__ : Any = weight_decay
snake_case__ : Optional[Any] = kwargs
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict , snake_case_ : str , snake_case_ : Optional[int]=None , snake_case_ : Any=0 , **snake_case_ : List[str] ):
snake_case__ : List[Any] = optimizer
snake_case__ : List[Any] = total_num_steps
snake_case__ : List[Any] = warmup_num_steps
snake_case__ : Optional[Any] = kwargs
| 374 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Optional[Any] = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__snake_case : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 615 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def _lowercase ( __snake_case ) -> int:
if hor == 128:
__lowerCAmelCase : str = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__lowerCAmelCase : int = (32, 128, 256)
__lowerCAmelCase : Optional[Any] = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
__lowerCAmelCase : List[str] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__lowerCAmelCase : Optional[Any] = (32, 64, 128, 256)
__lowerCAmelCase : Any = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
__lowerCAmelCase : Union[str, Any] = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
__lowerCAmelCase : List[Any] = model.state_dict()
__lowerCAmelCase : Optional[Any] = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65_536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
__lowerCAmelCase : Dict = UNetaDModel(**__snake_case )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__lowerCAmelCase : Dict = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase : int = state_dict.pop(__snake_case )
hf_value_function.load_state_dict(__snake_case )
torch.save(hf_value_function.state_dict() ,F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" ,"w" ) as f:
json.dump(__snake_case ,__snake_case )
def _lowercase ( ) -> List[str]:
__lowerCAmelCase : Union[str, Any] = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65_536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
__lowerCAmelCase : int = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
__lowerCAmelCase : Any = model
__lowerCAmelCase : Optional[int] = UNetaDModel(**__snake_case )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__lowerCAmelCase : Any = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase : Union[str, Any] = state_dict.pop(__snake_case )
hf_value_function.load_state_dict(__snake_case )
torch.save(hf_value_function.state_dict() ,"hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" ,"w" ) as f:
json.dump(__snake_case ,__snake_case )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function() | 615 | 1 |
'''simple docstring'''
import unittest
import numpy as np
def _lowerCAmelCase ( __magic_name__ : np.ndarray , __magic_name__ : np.ndarray , __magic_name__ : np.ndarray , __magic_name__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase : List[str] =np.shape(__magic_name__ )
lowercase : Optional[Any] =np.shape(__magic_name__ )
lowercase : Dict =np.shape(__magic_name__ )
if shape_a[0] != shape_b[0]:
lowercase : Optional[Any] =(
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(__magic_name__ )
if shape_b[1] != shape_c[1]:
lowercase : Optional[Any] =(
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(__magic_name__ )
lowercase : List[str] =pseudo_inv
if a_inv is None:
try:
lowercase : Optional[Any] =np.linalg.inv(__magic_name__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Optional[Any] =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase : Dict =np.array([[0, 3], [3, 0], [2, 3]] )
lowercase : Union[str, Any] =np.array([[2, 1], [6, 3]] )
lowercase : Union[str, Any] =schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =np.block([[a, b], [b.T, c]] )
lowercase : Union[str, Any] =np.linalg.det(UpperCAmelCase__ )
lowercase : List[Any] =np.linalg.det(UpperCAmelCase__ )
lowercase : List[str] =np.linalg.det(UpperCAmelCase__ )
self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Any =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase : Optional[Any] =np.array([[0, 3], [3, 0], [2, 3]] )
lowercase : Tuple =np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[Any] =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase : Optional[int] =np.array([[0, 3], [3, 0], [2, 3]] )
lowercase : Optional[Any] =np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 92 |
def __snake_case ( _UpperCamelCase ) -> list[int]:
if length <= 0 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_UpperCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 487 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_UpperCAmelCase : int =logging.getLogger(__name__)
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=128, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
SCREAMING_SNAKE_CASE__ : bool = field(
default=__lowerCAmelCase, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
SCREAMING_SNAKE_CASE__ : bool = field(
default=__lowerCAmelCase, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=__lowerCAmelCase, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=__lowerCAmelCase, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=__lowerCAmelCase, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(
default=__lowerCAmelCase, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE__ : str = field(
default=__lowerCAmelCase, metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__lowerCAmelCase, metadata={"""help""": """Train language if it is different from the evaluation language."""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__lowerCAmelCase, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__lowerCAmelCase, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__lowerCAmelCase, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
SCREAMING_SNAKE_CASE__ : Optional[bool] = field(
default=__lowerCAmelCase, metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""}, )
SCREAMING_SNAKE_CASE__ : bool = field(
default=__lowerCAmelCase, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
SCREAMING_SNAKE_CASE__ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
SCREAMING_SNAKE_CASE__ : bool = field(
default=__lowerCAmelCase, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
SCREAMING_SNAKE_CASE__ : bool = field(
default=__lowerCAmelCase, metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""}, )
def lowerCAmelCase ( )-> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase_ : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase_ : int = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase_ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase_ : Union[str, Any] = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase_ : Union[str, Any] = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ : Optional[int] = train_dataset.features['''label'''].names
if training_args.do_eval:
lowerCAmelCase_ : Tuple = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ : str = eval_dataset.features['''label'''].names
if training_args.do_predict:
lowerCAmelCase_ : int = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ : Tuple = predict_dataset.features['''label'''].names
# Labels
lowerCAmelCase_ : str = len(lowerCAmelCase_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , idalabel={str(lowerCAmelCase_ ): label for i, label in enumerate(lowerCAmelCase_ )} , labelaid={label: i for i, label in enumerate(lowerCAmelCase_ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase_ : Optional[int] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase_ : Optional[Any] = False
def preprocess_function(lowerCAmelCase_ ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=lowerCAmelCase_ , max_length=data_args.max_seq_length , truncation=lowerCAmelCase_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase_ : Union[str, Any] = min(len(lowerCAmelCase_ ) , data_args.max_train_samples )
lowerCAmelCase_ : Optional[Any] = train_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCAmelCase_ : Tuple = train_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCAmelCase_ ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase_ : Optional[int] = min(len(lowerCAmelCase_ ) , data_args.max_eval_samples )
lowerCAmelCase_ : int = eval_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCAmelCase_ : List[Any] = eval_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase_ : Optional[int] = min(len(lowerCAmelCase_ ) , data_args.max_predict_samples )
lowerCAmelCase_ : Tuple = predict_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
lowerCAmelCase_ : Tuple = predict_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
lowerCAmelCase_ : Any = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ ):
lowerCAmelCase_ : List[str] = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase_ ) else p.predictions
lowerCAmelCase_ : Union[str, Any] = np.argmax(lowerCAmelCase_ , axis=1 )
return metric.compute(predictions=lowerCAmelCase_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase_ : Optional[Any] = default_data_collator
elif training_args.fpaa:
lowerCAmelCase_ : Optional[Any] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 )
else:
lowerCAmelCase_ : Tuple = None
# Initialize our Trainer
lowerCAmelCase_ : Optional[int] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
lowerCAmelCase_ : str = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase_ : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase_ : List[str] = last_checkpoint
lowerCAmelCase_ : Tuple = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = train_result.metrics
lowerCAmelCase_ : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
lowerCAmelCase_ : Optional[int] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowerCAmelCase_ )
trainer.save_metrics('''train''' , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase_ : List[str] = trainer.evaluate(eval_dataset=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
lowerCAmelCase_ : Any = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics('''eval''' , lowerCAmelCase_ )
trainer.save_metrics('''eval''' , lowerCAmelCase_ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowerCAmelCase_ : str = trainer.predict(lowerCAmelCase_ , metric_key_prefix='''predict''' )
lowerCAmelCase_ : Union[str, Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCAmelCase_ )
)
lowerCAmelCase_ : List[Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics('''predict''' , lowerCAmelCase_ )
trainer.save_metrics('''predict''' , lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = np.argmax(lowerCAmelCase_ , axis=1 )
lowerCAmelCase_ : Union[str, Any] = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase_ ):
lowerCAmelCase_ : Union[str, Any] = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main() | 718 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__a = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class A__ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : str = " " ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = sentence_delimiter
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : str ) -> Dict:
"""simple docstring"""
return list(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : str = []
for sent_idx, sentence in enumerate(lowerCAmelCase__ ):
chars.extend(self.process_string(lowerCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__a = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__a = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__a = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
__a = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )["wer"]
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 494 | '''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__a = logging.get_logger(__name__)
__a = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''deberta-v2'''
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=1_2_8_1_0_0 , lowerCAmelCase__ : Optional[int]=1_5_3_6 , lowerCAmelCase__ : Dict=2_4 , lowerCAmelCase__ : Optional[Any]=2_4 , lowerCAmelCase__ : str=6_1_4_4 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Optional[Any]=5_1_2 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Tuple=1e-7 , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Any=-1 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=0 , lowerCAmelCase__ : Optional[int]="gelu" , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : Dict = relative_attention
_UpperCAmelCase : Tuple = max_relative_positions
_UpperCAmelCase : Optional[int] = pad_token_id
_UpperCAmelCase : Optional[int] = position_biased_input
# Backwards compatibility
if type(lowerCAmelCase__ ) == str:
_UpperCAmelCase : List[Any] = [x.strip() for x in pos_att_type.lower().split("|" )]
_UpperCAmelCase : Any = pos_att_type
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : str = layer_norm_eps
_UpperCAmelCase : Any = kwargs.get("pooler_hidden_size" , lowerCAmelCase__ )
_UpperCAmelCase : Any = pooler_dropout
_UpperCAmelCase : Any = pooler_hidden_act
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 1_2
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional["TensorType"] = None , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 4_0 , lowerCAmelCase__ : int = 4_0 , lowerCAmelCase__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = super().generate_dummy_inputs(preprocessor=lowerCAmelCase__ , framework=lowerCAmelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 494 | 1 |
"""simple docstring"""
from PIL import Image
def A__ ( _UpperCAmelCase : Image , _UpperCAmelCase : int ) -> Image:
'''simple docstring'''
snake_case__ : Tuple = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(_UpperCAmelCase : int ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(_UpperCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
lowercase = change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 150 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : int = ['''image_processor''', '''tokenizer''']
__magic_name__ : Optional[int] = '''LayoutLMv3ImageProcessor'''
__magic_name__ : Optional[int] = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase__ , )
snake_case__ : List[str] = kwargs.pop("feature_extractor")
snake_case__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(lowerCamelCase__ , lowerCamelCase__)
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.")
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True.")
# first, apply the image processor
snake_case__ : Dict = self.image_processor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase__ , lowerCamelCase__):
snake_case__ : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case__ : Any = features["words"]
snake_case__ : Union[str, Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel values
snake_case__ : Any = features.pop("pixel_values")
if return_overflowing_tokens is True:
snake_case__ : str = self.get_overflowing_images(lowerCamelCase__ , encoded_inputs["overflow_to_sample_mapping"])
snake_case__ : Optional[Any] = images
return encoded_inputs
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> Any:
'''simple docstring'''
snake_case__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(lowerCamelCase__) != len(lowerCamelCase__):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f""" {len(lowerCamelCase__)} and {len(lowerCamelCase__)}""")
return images_with_overflow
def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__)
def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase__ , )
return self.image_processor
| 150 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( __lowerCamelCase ,unittest.TestCase ):
__snake_case : str = CodeGenTokenizer
__snake_case : str = CodeGenTokenizerFast
__snake_case : Dict = True
__snake_case : Dict = {"""add_prefix_space""": True}
__snake_case : Optional[Any] = False
def A ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowerCAmelCase_ : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : Optional[int] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_UpperCAmelCase ) )
def A ( self : Optional[int] , **UpperCAmelCase : List[str] ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def A ( self : Tuple , **UpperCAmelCase : str ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def A ( self : Tuple , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Optional[int] = "lower newer"
lowerCAmelCase_ : Any = "lower newer"
return input_text, output_text
def A ( self : Tuple ):
lowerCAmelCase_ : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ : Optional[int] = "lower newer"
lowerCAmelCase_ : Union[str, Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : List[str] = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def A ( self : Any ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
lowerCAmelCase_ : str = "lower newer"
# Testing tokenization
lowerCAmelCase_ : List[str] = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
lowerCAmelCase_ : str = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : str = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
lowerCAmelCase_ : str = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
lowerCAmelCase_ : Tuple = tokenizer.encode(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
lowerCAmelCase_ : List[str] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing the unknown token
lowerCAmelCase_ : str = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def A ( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ):
pass
def A ( self : str , UpperCAmelCase : List[Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# Simple input
lowerCAmelCase_ : int = "This is a simple input"
lowerCAmelCase_ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : List[Any] = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : Optional[int] = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Optional[Any] = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : List[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : List[str] = tokenizer.pad_token_id
lowerCAmelCase_ : Optional[int] = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
lowerCAmelCase_ : Union[str, Any] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors="""np""" )
lowerCAmelCase_ : Dict = tokenizer(*_UpperCAmelCase , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
lowerCAmelCase_ : List[Any] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def A ( self : Dict ):
lowerCAmelCase_ : Optional[Any] = "$$$"
lowerCAmelCase_ : Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_UpperCAmelCase , add_bos_token=_UpperCAmelCase )
lowerCAmelCase_ : str = "This is a simple input"
lowerCAmelCase_ : str = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : List[str] = tokenizer.bos_token_id
lowerCAmelCase_ : Any = tokenizer(_UpperCAmelCase )
lowerCAmelCase_ : Any = tokenizer(_UpperCAmelCase )
self.assertEqual(out_s.input_ids[0] , _UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : int = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def A ( self : Any ):
lowerCAmelCase_ : List[str] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
lowerCAmelCase_ : int = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
lowerCAmelCase_ : Tuple = "\nif len_a > len_b: result = a\nelse: result = b"
lowerCAmelCase_ : Dict = tokenizer.encode(_UpperCAmelCase )
lowerCAmelCase_ : List[str] = ["^#", re.escape("""<|endoftext|>""" ), "^'''", "^\"\"\"", "\n\n\n"]
lowerCAmelCase_ : int = tokenizer.decode(_UpperCAmelCase , truncate_before_pattern=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def A ( self : Tuple ):
pass
| 600 |
from collections.abc import Callable
import numpy as np
def _a ( SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE__ : Tuple = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__ : Tuple = ya
SCREAMING_SNAKE_CASE__ : Dict = xa
for k in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 | 0 |
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
SCREAMING_SNAKE_CASE_ : List[str] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 6_5536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 6_5536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 13_1072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
}
def UpperCAmelCase__ ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
return torch.atana(A__ , A__ ) / math.pi * 2
def UpperCAmelCase__ ( A__ ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ = torch.sin(t * math.pi / 2 ) ** 2
lowerCamelCase__ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(A__ , A__ )
class _A ( __a ):
pass
class _A ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> int:
super().__init__()
lowerCamelCase__ = DiffusionAttnUnetaD(SCREAMING_SNAKE_CASE__ , n_attn_layers=4 )
lowerCamelCase__ = deepcopy(self.diffusion )
lowerCamelCase__ = torch.quasirandom.SobolEngine(1 , scramble=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( A__ ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ = MODELS_MAP[model_name]["url"]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
SCREAMING_SNAKE_CASE_ : Any = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
SCREAMING_SNAKE_CASE_ : Dict = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def UpperCAmelCase__ ( A__ ) -> List[Any]:
"""simple docstring"""
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def UpperCAmelCase__ ( A__ ) -> List[str]:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(A__ ) and not isinstance(A__ , A__ ):
return name.replace(A__ , A__ )
elif name.startswith(A__ ):
return [name.replace(A__ , A__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def UpperCAmelCase__ ( A__ , A__=13 ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
lowerCamelCase__ = 0
if string.startswith("net.3." ):
depth += 1
lowerCamelCase__ = string[6:]
elif string.startswith("net." ):
lowerCamelCase__ = string[4:]
while string.startswith("main.7." ):
depth += 1
lowerCamelCase__ = string[7:]
if string.startswith("main." ):
lowerCamelCase__ = string[5:]
# mid block
if string[:2].isdigit():
lowerCamelCase__ = string[:2]
lowerCamelCase__ = string[2:]
else:
lowerCamelCase__ = string[0]
lowerCamelCase__ = string[1:]
if depth == max_depth:
lowerCamelCase__ = MID_NUM_TO_LAYER[layer_num]
lowerCamelCase__ = "mid_block"
elif depth > 0 and int(A__ ) < 7:
lowerCamelCase__ = DOWN_NUM_TO_LAYER[layer_num]
lowerCamelCase__ = f'down_blocks.{depth}'
elif depth > 0 and int(A__ ) > 7:
lowerCamelCase__ = UP_NUM_TO_LAYER[layer_num]
lowerCamelCase__ = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
lowerCamelCase__ = DEPTH_0_TO_LAYER[layer_num]
lowerCamelCase__ = f'up_blocks.{max_depth - 1}' if int(A__ ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
lowerCamelCase__ = string_left[1:]
if "resnets" in new_layer:
lowerCamelCase__ = convert_resconv_naming(A__ )
elif "attentions" in new_layer:
lowerCamelCase__ = convert_attn_naming(A__ )
lowerCamelCase__ = new_string_left
if not isinstance(A__ , A__ ):
lowerCamelCase__ = prefix + "." + new_layer + "." + string_left
else:
lowerCamelCase__ = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def UpperCAmelCase__ ( A__ ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
lowerCamelCase__ = rename(A__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(A__ , A__ ):
lowerCamelCase__ = transform_conv_attns(A__ , A__ , A__ )
else:
lowerCamelCase__ = v
return new_state_dict
def UpperCAmelCase__ ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
if len(A__ ) == 1:
if len(v.shape ) == 3:
# weight
lowerCamelCase__ = v[:, :, 0]
else:
# bias
lowerCamelCase__ = v
else:
# qkv matrices
lowerCamelCase__ = v.shape[0]
lowerCamelCase__ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowerCamelCase__ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowerCamelCase__ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def UpperCAmelCase__ ( A__ ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCamelCase__ = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
lowerCamelCase__ = download(A__ )
lowerCamelCase__ = MODELS_MAP[model_name]["sample_rate"]
lowerCamelCase__ = MODELS_MAP[model_name]["sample_size"]
lowerCamelCase__ = Object()
lowerCamelCase__ = sample_size
lowerCamelCase__ = sample_rate
lowerCamelCase__ = 0
lowerCamelCase__ = UNetaDModel(sample_size=A__ , sample_rate=A__ )
lowerCamelCase__ = diffusers_model.state_dict()
lowerCamelCase__ = DiffusionUncond(A__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=A__ )["state_dict"] )
lowerCamelCase__ = orig_model.diffusion_ema.eval()
lowerCamelCase__ = orig_model.state_dict()
lowerCamelCase__ = rename_orig_weights(A__ )
lowerCamelCase__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowerCamelCase__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(A__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("kernel" ) for k in list(A__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
lowerCamelCase__ = value.squeeze()
lowerCamelCase__ = value
diffusers_model.load_state_dict(A__ )
lowerCamelCase__ = 100
lowerCamelCase__ = 33
lowerCamelCase__ = IPNDMScheduler(num_train_timesteps=A__ )
lowerCamelCase__ = torch.manual_seed(A__ )
lowerCamelCase__ = torch.randn([1, 2, config.sample_size] , generator=A__ ).to(A__ )
lowerCamelCase__ = torch.linspace(1 , 0 , steps + 1 , device=A__ )[:-1]
lowerCamelCase__ = get_crash_schedule(A__ )
lowerCamelCase__ = DanceDiffusionPipeline(unet=A__ , scheduler=A__ )
lowerCamelCase__ = torch.manual_seed(33 )
lowerCamelCase__ = pipe(num_inference_steps=A__ , generator=A__ ).audios
lowerCamelCase__ = sampling.iplms_sample(A__ , A__ , A__ , {} )
lowerCamelCase__ = generated.clamp(-1 , 1 )
lowerCamelCase__ = (generated - audio).abs().sum()
lowerCamelCase__ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , A__ )
print("Diff max" , A__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args()
main(args)
| 274 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : int = 32
def UpperCAmelCase__ ( A__ ) -> Optional[int]:
"""simple docstring"""
return int(x / 2**20 )
class _A :
def __enter__( self ) -> Dict:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCamelCase__ = torch.cuda.memory_allocated()
return self
def __exit__( self , *SCREAMING_SNAKE_CASE__ ) -> Dict:
gc.collect()
torch.cuda.empty_cache()
lowerCamelCase__ = torch.cuda.memory_allocated()
lowerCamelCase__ = torch.cuda.max_memory_allocated()
lowerCamelCase__ = bamb(self.end - self.begin )
lowerCamelCase__ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCAmelCase__ ( A__ , A__ = 16 , A__ = "bert-base-cased" , A__ = 320 , A__ = 160 , ) -> Dict:
"""simple docstring"""
lowerCamelCase__ = AutoTokenizer.from_pretrained(A__ )
lowerCamelCase__ = load_dataset(
"glue" , "mrpc" , split={"train": f'train[:{n_train}]', "validation": f'validation[:{n_val}]'} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase__ = datasets.map(
A__ , batched=A__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(A__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCamelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
lowerCamelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def UpperCAmelCase__ ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
# Initialize accelerator
lowerCamelCase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ = config["lr"]
lowerCamelCase__ = int(config["num_epochs"] )
lowerCamelCase__ = int(config["seed"] )
lowerCamelCase__ = int(config["batch_size"] )
lowerCamelCase__ = args.model_name_or_path
set_seed(A__ )
lowerCamelCase__ , lowerCamelCase__ = get_dataloaders(A__ , A__ , A__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
lowerCamelCase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase__ = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowerCamelCase__ = 1
lowerCamelCase__ = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase__ = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
lowerCamelCase__ = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase__ = 0
# Now we train the model
lowerCamelCase__ = {}
for epoch in range(A__ , A__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(A__ ):
lowerCamelCase__ = model(**A__ )
lowerCamelCase__ = outputs.loss
lowerCamelCase__ = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCamelCase__ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(A__ , A__ )
def UpperCAmelCase__ ( ) -> Any:
"""simple docstring"""
lowerCamelCase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A__ , )
parser.add_argument(
"--output_dir" , type=A__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=A__ , default=A__ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=A__ , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=A__ , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=A__ , default=1 , help="Number of train epochs." , )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 274 | 1 |
def __UpperCAmelCase( lowercase_ ):
_lowerCamelCase : List[Any] = [False] * len(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : int = [-1] * len(SCREAMING_SNAKE_CASE__ )
def dfs(lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Any = c
for u in graph[v]:
if not visited[u]:
dfs(SCREAMING_SNAKE_CASE__ , 1 - c )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if not visited[i]:
dfs(SCREAMING_SNAKE_CASE__ , 0 )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowerCamelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 114 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ : Optional[Any] = 16
UpperCAmelCase_ : List[str] = 32
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 16 , SCREAMING_SNAKE_CASE__ = "bert-base-cased" ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : List[str] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(SCREAMING_SNAKE_CASE__ ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_SCREAMING_SNAKE_CASE : str = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(SCREAMING_SNAKE_CASE__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
model.eval()
_SCREAMING_SNAKE_CASE : Dict = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Tuple = model(**SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
_SCREAMING_SNAKE_CASE : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_SCREAMING_SNAKE_CASE : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
_SCREAMING_SNAKE_CASE : str = metric.compute()
return eval_metric["accuracy"]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE : Optional[int] = config["""lr"""]
_SCREAMING_SNAKE_CASE : Any = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE : Optional[Any] = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE : Tuple = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE : List[str] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_SCREAMING_SNAKE_CASE : Any = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
_SCREAMING_SNAKE_CASE : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_SCREAMING_SNAKE_CASE : List[Any] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE__ , )
else:
_SCREAMING_SNAKE_CASE : Any = DummyScheduler(SCREAMING_SNAKE_CASE__ , total_num_steps=SCREAMING_SNAKE_CASE__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : Tuple = evaluate.load("""glue""" , """mrpc""" )
_SCREAMING_SNAKE_CASE : int = num_epochs
if args.partial_train_epoch is not None:
_SCREAMING_SNAKE_CASE : Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_SCREAMING_SNAKE_CASE : Any = args.resume_from_checkpoint.split("""epoch_""" )[1]
_SCREAMING_SNAKE_CASE : Union[str, Any] = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_SCREAMING_SNAKE_CASE : int = int(SCREAMING_SNAKE_CASE__ ) + 1
_SCREAMING_SNAKE_CASE : List[str] = evaluation_loop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.print("""resumed checkpoint performance:""" , SCREAMING_SNAKE_CASE__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Any = json.load(SCREAMING_SNAKE_CASE__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_SCREAMING_SNAKE_CASE : int = {}
for epoch in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = outputs.loss
_SCREAMING_SNAKE_CASE : int = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_SCREAMING_SNAKE_CASE : int = f"""epoch_{epoch}"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = evaluation_loop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Dict = accuracy
_SCREAMING_SNAKE_CASE : Any = lr_scheduler.get_lr()[0]
_SCREAMING_SNAKE_CASE : Any = optimizer.param_groups[0]["""lr"""]
_SCREAMING_SNAKE_CASE : Dict = epoch
_SCREAMING_SNAKE_CASE : Union[str, Any] = overall_step
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=SCREAMING_SNAKE_CASE__ , )
parser.add_argument(
"""--output_dir""" , type=SCREAMING_SNAKE_CASE__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=SCREAMING_SNAKE_CASE__ , default=2 , help="""Number of train epochs.""" , )
_SCREAMING_SNAKE_CASE : str = parser.parse_args()
_SCREAMING_SNAKE_CASE : int = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 533 | 0 |
'''simple docstring'''
import os
def A_ ( ) -> Any:
"""simple docstring"""
__A : List[str] = os.path.dirname(os.path.realpath(__SCREAMING_SNAKE_CASE ) )
__A : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , """triangle.txt""" )
with open(__SCREAMING_SNAKE_CASE ) as f:
__A : Union[str, Any] = f.readlines()
__A : List[Any] = []
for line in triangle:
__A : Optional[int] = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(__SCREAMING_SNAKE_CASE ) )
a.append(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
for j in range(len(a[i] ) ):
__A : Any = a[i - 1][j] if j != len(a[i - 1] ) else 0
__A : List[str] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 499 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A__ : List[str] ='pt'
elif is_tf_available():
A__ : List[str] ='tf'
else:
A__ : List[str] ='jax'
class __A ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase =PerceiverTokenizer
lowerCamelCase =False
def lowercase_( self : int ):
"""simple docstring"""
super().setUp()
__A : Optional[int] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_( self : Any ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def lowercase_( self : Optional[Any] , **lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowercase_( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=20 , lowerCamelCase : List[str]=5 ):
"""simple docstring"""
__A : Optional[Any] = []
for i in range(len(lowerCamelCase ) ):
try:
__A : str = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__A : List[str] = list(filter(lambda lowerCamelCase : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , lowerCamelCase ) )
__A : List[str] = list(filter(lambda lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase ) , lowerCamelCase ) )
if max_length is not None and len(lowerCamelCase ) > max_length:
__A : Optional[Any] = toks[:max_length]
if min_length is not None and len(lowerCamelCase ) < min_length and len(lowerCamelCase ) > 0:
while len(lowerCamelCase ) < min_length:
__A : Dict = toks + toks
# toks_str = [t[1] for t in toks]
__A : Optional[int] = [t[0] for t in toks]
# Ensure consistency
__A : Optional[Any] = tokenizer.decode(lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
if " " not in output_txt and len(lowerCamelCase ) > 1:
__A : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase )
)
if with_prefix_space:
__A : Union[str, Any] = """ """ + output_txt
__A : Any = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
return output_txt, output_ids
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : int = self.perceiver_tokenizer
__A : List[str] = """Unicode €."""
__A : Dict = tokenizer(lowerCamelCase )
__A : Any = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded["""input_ids"""] , lowerCamelCase )
# decoding
__A : Dict = tokenizer.decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , """[CLS]Unicode €.[SEP]""" )
__A : int = tokenizer("""e è é ê ë""" )
__A : str = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded["""input_ids"""] , lowerCamelCase )
# decoding
__A : Optional[Any] = tokenizer.decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def lowercase_( self : Union[str, Any] ):
"""simple docstring"""
__A : Optional[Any] = self.perceiver_tokenizer
__A : int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__A : Any = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
__A : Optional[int] = tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
if FRAMEWORK != "jax":
__A : str = list(batch.input_ids.numpy()[0] )
else:
__A : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : str = self.perceiver_tokenizer
__A : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__A : Optional[int] = tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , lowerCamelCase )
self.assertIn("""attention_mask""" , lowerCamelCase )
self.assertNotIn("""decoder_input_ids""" , lowerCamelCase )
self.assertNotIn("""decoder_attention_mask""" , lowerCamelCase )
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : Optional[int] = self.perceiver_tokenizer
__A : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
__A : Union[str, Any] = tokenizer(
text_target=lowerCamelCase , max_length=32 , padding="""max_length""" , truncation=lowerCamelCase , return_tensors=lowerCamelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__A : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__A : Optional[int] = tempfile.mkdtemp()
__A : List[str] = """ He is very happy, UNwant\u00E9d,running"""
__A : str = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
__A : Optional[int] = tokenizer.__class__.from_pretrained(lowerCamelCase )
__A : Optional[int] = after_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
shutil.rmtree(lowerCamelCase )
__A : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__A : List[Any] = tempfile.mkdtemp()
__A : Optional[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__A : str = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__A : List[Any] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
__A : Tuple = tokenizer.__class__.from_pretrained(lowerCamelCase )
__A : Union[str, Any] = after_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__A : List[str] = tokenizer.__class__.from_pretrained(lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase )
def lowercase_( self : int ):
"""simple docstring"""
__A : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase )
with open(os.path.join(lowerCamelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__A : int = json.load(lowerCamelCase )
with open(os.path.join(lowerCamelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__A : Any = json.load(lowerCamelCase )
__A : str = [f"<extra_id_{i}>" for i in range(1_25 )]
__A : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__A : Any = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(lowerCamelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCamelCase , lowerCamelCase )
with open(os.path.join(lowerCamelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCamelCase , lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__A : int = tokenizer_class.from_pretrained(
lowerCamelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__A : int = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=lowerCamelCase )]
__A : str = tokenizer_class.from_pretrained(
lowerCamelCase , additional_special_tokens=lowerCamelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowercase_( self : Union[str, Any] ):
"""simple docstring"""
__A : Dict = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , """�""" )
def lowercase_( self : Optional[int] ):
"""simple docstring"""
pass
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
pass
def lowercase_( self : Any ):
"""simple docstring"""
pass
def lowercase_( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowercase_( self : Optional[int] ):
"""simple docstring"""
__A : Optional[int] = self.get_tokenizers(fast=lowerCamelCase , do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__A : Optional[int] = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
__A : List[Any] = tokenizer.convert_tokens_to_string(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
| 499 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def snake_case ( ):
'''simple docstring'''
__lowercase = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=__SCREAMING_SNAKE_CASE , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=__SCREAMING_SNAKE_CASE , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=__SCREAMING_SNAKE_CASE , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=__SCREAMING_SNAKE_CASE , default=0 , help="""cuda_id.""" , )
__lowercase = parser.parse_args()
return args
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not len(__SCREAMING_SNAKE_CASE ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
__lowercase = imgs[0].size
__lowercase = Image.new("""RGB""" , size=(cols * w, rows * h) )
__lowercase = grid.size
for i, img in enumerate(__SCREAMING_SNAKE_CASE ):
grid.paste(__SCREAMING_SNAKE_CASE , box=(i % cols * w, i // cols * h) )
return grid
def snake_case ( lowerCamelCase , lowerCamelCase="robotic cat with wings" , lowerCamelCase=7.5 , lowerCamelCase=50 , lowerCamelCase=1 , lowerCamelCase=42 , ):
'''simple docstring'''
__lowercase = torch.Generator(pipeline.device ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowercase = pipeline(
__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , ).images
__lowercase = int(math.sqrt(__SCREAMING_SNAKE_CASE ) )
__lowercase = image_grid(__SCREAMING_SNAKE_CASE , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase : List[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase : Tuple = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
__UpperCamelCase : Optional[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
__UpperCamelCase : Any = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
__UpperCamelCase : Union[str, Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
__UpperCamelCase : int = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase : Optional[Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
__UpperCamelCase : Optional[Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
__UpperCamelCase : Tuple = unet.to(torch.device("""cuda""", args.cuda_id))
__UpperCamelCase : int = pipeline.to(unet.device)
__UpperCamelCase , __UpperCamelCase : str = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
__UpperCamelCase : str = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 80 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
while second != 0:
__lowerCAmelCase: int = first & second
first ^= second
__lowerCAmelCase: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = int(input("Enter the first number: ").strip())
__A = int(input("Enter the second number: ").strip())
print(F'''{add(first, second) = }''')
| 346 | 0 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = ComputeEnvironment.AMAZON_SAGEMAKER
_UpperCamelCase : str = True
_UpperCamelCase : str = """ml.p3.2xlarge"""
_UpperCamelCase : str = """accelerate_sagemaker_execution_role"""
_UpperCamelCase : Tuple = """hf-sm"""
_UpperCamelCase : List[str] = """us-east-1"""
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Dict = """accelerate-sagemaker-1"""
_UpperCamelCase : List[Any] = """1.6"""
_UpperCamelCase : int = """4.4"""
_UpperCamelCase : Optional[Any] = """train.py"""
_UpperCamelCase : List[str] = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
_UpperCamelCase : int = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Dict = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , snake_case )
assert isinstance(converted_args["do_train"] , snake_case )
assert isinstance(converted_args["epochs"] , snake_case )
assert isinstance(converted_args["learning_rate"] , snake_case )
assert isinstance(converted_args["max_steps"] , snake_case )
with pytest.raises(snake_case ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 629 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCAmelCase_ ):
def __init__( self , *a_ , **a_ ) -> Any:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 657 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
snake_case_ : Union[str, Any] = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def lowercase__( _UpperCamelCase : str = "mumbai" )-> Generator[tuple[str, str], None, None]:
"""simple docstring"""
_UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
_UpperCamelCase = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
_UpperCamelCase = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 138 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowercase ( __A ):
lowercase__ : Tuple = """bridgetower_vision_model"""
def __init__( self : Union[str, Any] , _UpperCamelCase : List[str]=768 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : List[Any]=16 , _UpperCamelCase : List[Any]=288 , _UpperCamelCase : str=1 , _UpperCamelCase : int=1e-05 , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Dict=False , **_UpperCamelCase : int , ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = stop_gradient
SCREAMING_SNAKE_CASE = share_layernorm
SCREAMING_SNAKE_CASE = remove_last_layer
@classmethod
def __snake_case( cls : str , _UpperCamelCase : Optional[int] , **_UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
if config_dict.get("model_type" ) == "bridgetower":
SCREAMING_SNAKE_CASE = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class lowercase ( __A ):
lowercase__ : Optional[Any] = """bridgetower_text_model"""
def __init__( self : str , _UpperCamelCase : List[str]=50_265 , _UpperCamelCase : Optional[int]=768 , _UpperCamelCase : Union[str, Any]=12 , _UpperCamelCase : int=12 , _UpperCamelCase : Optional[Any]=1 , _UpperCamelCase : Optional[Any]=3_072 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : int=514 , _UpperCamelCase : Optional[int]=1 , _UpperCamelCase : Optional[Any]=1e-05 , _UpperCamelCase : str=1 , _UpperCamelCase : Dict=0 , _UpperCamelCase : Dict=2 , _UpperCamelCase : str="absolute" , _UpperCamelCase : int=True , **_UpperCamelCase : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
@classmethod
def __snake_case( cls : Dict , _UpperCamelCase : Tuple , **_UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
if config_dict.get("model_type" ) == "bridgetower":
SCREAMING_SNAKE_CASE = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class lowercase ( __A ):
lowercase__ : Dict = """bridgetower"""
def __init__( self : Dict , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : List[str]=768 , _UpperCamelCase : List[str]=1 , _UpperCamelCase : Dict=1e-05 , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : str="add" , _UpperCamelCase : Optional[int]=12 , _UpperCamelCase : Optional[Any]=6 , _UpperCamelCase : Tuple=False , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[int]=None , **_UpperCamelCase : Tuple , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.pop("text_config_dict" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE = kwargs.pop("vision_config_dict" , UpperCamelCase__ )
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE = share_cross_modal_transformer_layers
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = share_link_tower_layers
SCREAMING_SNAKE_CASE = link_tower_type
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = init_layernorm_from_vision_encoder
if text_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
SCREAMING_SNAKE_CASE = BridgeTowerTextConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE = BridgeTowerVisionConfig(**UpperCamelCase__ )
@classmethod
def __snake_case( cls : Tuple , _UpperCamelCase : str , _UpperCamelCase : List[str] , **_UpperCamelCase : Any ) -> int:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ )
def __snake_case( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.text_config.to_dict()
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 709 | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : List[Any] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 647 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __lowercase ( __snake_case ):
def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase = value_function
UpperCAmelCase = unet
UpperCAmelCase = scheduler
UpperCAmelCase = env
UpperCAmelCase = env.get_dataset()
UpperCAmelCase = {}
for key in self.data.keys():
try:
UpperCAmelCase = self.data[key].mean()
except: # noqa: E722
pass
UpperCAmelCase = {}
for key in self.data.keys():
try:
UpperCAmelCase = self.data[key].std()
except: # noqa: E722
pass
UpperCAmelCase = env.observation_space.shape[0]
UpperCAmelCase = env.action_space.shape[0]
def _lowercase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : str ) -> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def _lowercase ( self : List[Any] , __lowerCamelCase : Any ) -> Any:
"""simple docstring"""
if type(__lowerCamelCase ) is dict:
return {k: self.to_torch(__lowerCamelCase ) for k, v in x_in.items()}
elif torch.is_tensor(__lowerCamelCase ):
return x_in.to(self.unet.device )
return torch.tensor(__lowerCamelCase , device=self.unet.device )
def _lowercase ( self : str , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ) -> int:
"""simple docstring"""
for key, val in cond.items():
UpperCAmelCase = val.clone()
return x_in
def _lowercase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = x.shape[0]
UpperCAmelCase = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCAmelCase = torch.full((batch_size,) , __lowerCamelCase , device=self.unet.device , dtype=torch.long )
for _ in range(__lowerCamelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCAmelCase = self.value_function(x.permute(0 , 2 , 1 ) , __lowerCamelCase ).sample
UpperCAmelCase = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCAmelCase = self.scheduler._get_variance(__lowerCamelCase )
UpperCAmelCase = torch.exp(0.5 * posterior_variance )
UpperCAmelCase = model_std * grad
UpperCAmelCase = 0
UpperCAmelCase = x.detach()
UpperCAmelCase = x + scale * grad
UpperCAmelCase = self.reset_xa(__lowerCamelCase , __lowerCamelCase , self.action_dim )
UpperCAmelCase = self.unet(x.permute(0 , 2 , 1 ) , __lowerCamelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCAmelCase = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , predict_epsilon=__lowerCamelCase )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCAmelCase = self.reset_xa(__lowerCamelCase , __lowerCamelCase , self.action_dim )
UpperCAmelCase = self.to_torch(__lowerCamelCase )
return x, y
def __call__( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple=6_4 , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : List[str]=0.1 ) -> str:
"""simple docstring"""
UpperCAmelCase = self.normalize(__lowerCamelCase , """observations""" )
UpperCAmelCase = obs[None].repeat(__lowerCamelCase , axis=0 )
UpperCAmelCase = {0: self.to_torch(__lowerCamelCase )}
UpperCAmelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCAmelCase = randn_tensor(__lowerCamelCase , device=self.unet.device )
UpperCAmelCase = self.reset_xa(__lowerCamelCase , __lowerCamelCase , self.action_dim )
UpperCAmelCase = self.to_torch(__lowerCamelCase )
# run the diffusion process
UpperCAmelCase , UpperCAmelCase = self.run_diffusion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# sort output trajectories by value
UpperCAmelCase = y.argsort(0 , descending=__lowerCamelCase ).squeeze()
UpperCAmelCase = x[sorted_idx]
UpperCAmelCase = sorted_values[:, :, : self.action_dim]
UpperCAmelCase = actions.detach().cpu().numpy()
UpperCAmelCase = self.de_normalize(__lowerCamelCase , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCAmelCase = 0
else:
# if we didn't run value guiding, select a random action
UpperCAmelCase = np.random.randint(0 , __lowerCamelCase )
UpperCAmelCase = denorm_actions[selected_index, 0]
return denorm_actions
| 377 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
lowercase = logging.getLogger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''masked_bert'''
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=0 , snake_case="topK" , snake_case="constant" , snake_case=0.0 , **snake_case , ) -> str:
super().__init__(pad_token_id=snake_case , **snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = pruning_method
_UpperCAmelCase = mask_init
_UpperCAmelCase = mask_scale
| 573 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
# Automatically constructed
SCREAMING_SNAKE_CASE_ = "dict"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = field(default='Translation' , init=__A , repr=__A )
def __call__( self ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowerCamelCase( self ):
"""simple docstring"""
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# Automatically constructed
SCREAMING_SNAKE_CASE_ = "dict"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = field(default='TranslationVariableLanguages' , init=__A , repr=__A )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Any = sorted(set(self.languages ) ) if self.languages else None
_snake_case : Dict = len(self.languages ) if self.languages else None
def __call__( self ):
"""simple docstring"""
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = set(self.languages )
if self.languages and set(UpperCamelCase__ ) - lang_set:
raise ValueError(
f'''Some languages in example ({", ".join(sorted(set(UpperCamelCase__ ) - lang_set ) )}) are not in valid set ({", ".join(UpperCamelCase__ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_snake_case : List[str] = []
for lang, text in translation_dict.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_snake_case : Dict = zip(*sorted(UpperCamelCase__ ) )
return {"language": languages, "translation": translations}
def __lowerCamelCase( self ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 709 |
def UpperCAmelCase ( A__ ) -> list[list[int]]:
_snake_case : List[str] = []
if len(A__ ) == 1:
return [nums.copy()]
for _ in range(len(A__ ) ):
_snake_case : Optional[Any] = nums.pop(0 )
_snake_case : Any = permute(A__ )
for perm in permutations:
perm.append(A__ )
result.extend(A__ )
nums.append(A__ )
return result
def UpperCAmelCase ( A__ ) -> List[Any]:
def backtrack(A__ ):
if start == len(A__ ) - 1:
output.append(nums[:] )
else:
for i in range(A__ , len(A__ ) ):
_snake_case , _snake_case : Dict = nums[i], nums[start]
backtrack(start + 1 )
_snake_case , _snake_case : Union[str, Any] = nums[i], nums[start] # backtrack
_snake_case : int = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
UpperCAmelCase_ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 519 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( lowercase : Any , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ):
# Initialise PyTorch model
snake_case_ = FunnelConfig.from_json_file(lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
snake_case_ = FunnelBaseModel(lowercase ) if base_model else FunnelModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase , lowercase , lowercase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
lowercase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 508 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __snake_case ( lowercase : Dict ):
snake_case_ = {}
snake_case_ = job["started_at"]
snake_case_ = job["completed_at"]
snake_case_ = date_parser.parse(lowercase )
snake_case_ = date_parser.parse(lowercase )
snake_case_ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case_ = start
snake_case_ = end
snake_case_ = duration_in_min
return job_info
def __snake_case ( lowercase : Tuple , lowercase : Dict=None ):
snake_case_ = None
if token is not None:
snake_case_ = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
snake_case_ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
snake_case_ = requests.get(lowercase , headers=lowercase ).json()
snake_case_ = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(lowercase ) for job in result["jobs"]} )
snake_case_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
snake_case_ = requests.get(url + f'''&page={i + 2}''' , headers=lowercase ).json()
job_time.update({job["name"]: extract_time_from_single_job(lowercase ) for job in result["jobs"]} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
lowercase__ = parser.parse_args()
lowercase__ = get_job_time(args.workflow_run_id)
lowercase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 508 | 1 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 432 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def A_( A : np.ndarray , A : np.ndarray , A : np.ndarray , A : int , A : int):
UpperCamelCase = cva.getAffineTransform(A , A)
return cva.warpAffine(A , A , (rows, cols))
if __name__ == "__main__":
# read original image
lowerCAmelCase : int = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
lowerCAmelCase : Dict = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowerCAmelCase , lowerCAmelCase : str = gray_img.shape
# set different points to rotate image
lowerCAmelCase : Dict = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
lowerCAmelCase : int = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
lowerCAmelCase : List[str] = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
lowerCAmelCase : Union[str, Any] = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
lowerCAmelCase : Optional[int] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowerCAmelCase : int = plt.figure(1)
lowerCAmelCase : Optional[int] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 432 | 1 |
class UpperCamelCase__ :
def __init__( self : int ) -> Optional[Any]:
UpperCamelCase__ : Optional[Any] = ''''''
UpperCamelCase__ : Any = ''''''
UpperCamelCase__ : Optional[Any] = []
def __lowercase( self : Dict, __lowerCamelCase : Optional[int], __lowerCamelCase : Dict ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCamelCase__ : List[str] = self.__min_dist_top_down_dp(m - 1, n - 1 )
else:
UpperCamelCase__ : Optional[Any] = self.__min_dist_top_down_dp(_lowerCamelCase, n - 1 )
UpperCamelCase__ : List[Any] = self.__min_dist_top_down_dp(m - 1, _lowerCamelCase )
UpperCamelCase__ : List[Any] = self.__min_dist_top_down_dp(m - 1, n - 1 )
UpperCamelCase__ : Optional[Any] = 1 + min(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
return self.dp[m][n]
def __lowercase( self : Optional[int], __lowerCamelCase : Union[str, Any], __lowerCamelCase : List[str] ) -> int:
UpperCamelCase__ : Any = worda
UpperCamelCase__ : List[Any] = worda
UpperCamelCase__ : List[Any] = [[-1 for _ in range(len(_lowerCamelCase ) )] for _ in range(len(_lowerCamelCase ) )]
return self.__min_dist_top_down_dp(len(_lowerCamelCase ) - 1, len(_lowerCamelCase ) - 1 )
def __lowercase( self : Union[str, Any], __lowerCamelCase : str, __lowerCamelCase : List[Any] ) -> int:
UpperCamelCase__ : Any = worda
UpperCamelCase__ : Union[str, Any] = worda
UpperCamelCase__ : List[Any] = len(_lowerCamelCase )
UpperCamelCase__ : Any = len(_lowerCamelCase )
UpperCamelCase__ : List[str] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCamelCase__ : Optional[int] = j
elif j == 0: # second string is empty
UpperCamelCase__ : List[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCamelCase__ : Union[str, Any] = self.dp[i - 1][j - 1]
else:
UpperCamelCase__ : int = self.dp[i][j - 1]
UpperCamelCase__ : List[str] = self.dp[i - 1][j]
UpperCamelCase__ : Optional[int] = self.dp[i - 1][j - 1]
UpperCamelCase__ : Tuple = 1 + min(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
_SCREAMING_SNAKE_CASE : Optional[Any] = input("""Enter the first string: """).strip()
_SCREAMING_SNAKE_CASE : List[str] = input("""Enter the second string: """).strip()
print()
print(F'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(F'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 344 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def _lowerCAmelCase ( lowercase : Any ) ->int:
"""simple docstring"""
lowercase__ = {}
lowercase__ = os.path.join(lowercase , '''all_results.json''' )
if os.path.exists(lowercase ):
with open(lowercase , '''r''' ) as f:
lowercase__ = json.load(lowercase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __A ( a ):
"""simple docstring"""
def snake_case_( self )-> List[str]:
import xla_spawn
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_lowerCamelCase , '''argv''' , _lowerCamelCase ):
lowercase__ = time()
xla_spawn.main()
lowercase__ = time()
lowercase__ = get_results(_lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def snake_case_( self )-> Tuple:
import xla_spawn
lowercase__ = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(_lowerCamelCase , '''argv''' , _lowerCamelCase ):
xla_spawn.main()
| 161 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_A = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
_A = dataset.iloc[:, 1:2].values
_A = dataset.iloc[:, 2].values
_A , _A , _A , _A = train_test_split(X, y, test_size=0.2, random_state=0)
_A = PolynomialFeatures(degree=4)
_A = poly_reg.fit_transform(X)
_A = LinearRegression()
pol_reg.fit(X_poly, y)
def lowercase_ ( ) -> Optional[Any]:
"""simple docstring"""
plt.scatter(A__ , A__ , color="red" )
plt.plot(A__ , pol_reg.predict(poly_reg.fit_transform(A__ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 294 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Any = "poolformer"
def __init__(self : Optional[int] , _A : Optional[Any]=3 , _A : Optional[int]=1_6 , _A : Dict=1_6 , _A : Tuple=3 , _A : Tuple=4.0 , _A : int=[2, 2, 6, 2] , _A : Dict=[6_4, 1_2_8, 3_2_0, 5_1_2] , _A : int=[7, 3, 3, 3] , _A : List[str]=[4, 2, 2, 2] , _A : str=[2, 1, 1, 1] , _A : List[Any]=4 , _A : Any=0.0 , _A : Optional[Any]="gelu" , _A : Optional[Any]=True , _A : List[str]=1E-5 , _A : List[str]=0.02 , **_A : str , ) -> Tuple:
snake_case = num_channels
snake_case = patch_size
snake_case = stride
snake_case = padding
snake_case = pool_size
snake_case = hidden_sizes
snake_case = mlp_ratio
snake_case = depths
snake_case = patch_sizes
snake_case = strides
snake_case = num_encoder_blocks
snake_case = drop_path_rate
snake_case = hidden_act
snake_case = use_layer_scale
snake_case = layer_scale_init_value
snake_case = initializer_range
super().__init__(**_A )
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Tuple = version.parse("1.11" )
@property
def UpperCAmelCase(self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase(self : int ) -> float:
return 2E-3
| 294 | 1 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A__: str = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
A__: Dict = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
A__: Any = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
A__: Optional[int] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
A__: Dict = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCAmelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict=[1, 1_0, 1_0_0] , SCREAMING_SNAKE_CASE :Tuple=4 , SCREAMING_SNAKE_CASE :int=3.0 ) -> List[Any]:
'''simple docstring'''
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE ) as executor:
_a : Optional[Any] =[]
_a : List[Any] =Counter()
_a : Dict =0
_a : Optional[int] =defaultdict(SCREAMING_SNAKE_CASE )
for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
for candidate in candidates:
_a : Optional[int] =candidate + """\n""" + test_case
_a : Optional[int] =(test_program, timeout, task_id, completion_id[task_id])
_a : Optional[Any] =executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
futures.append(SCREAMING_SNAKE_CASE )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(SCREAMING_SNAKE_CASE ):
_a : Union[str, Any] =future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_a , _a : Any =[], []
for result in results.values():
result.sort()
_a : str =[r[1]["""passed"""] for r in result]
total.append(len(SCREAMING_SNAKE_CASE ) )
correct.append(sum(SCREAMING_SNAKE_CASE ) )
_a : Union[str, Any] =np.array(SCREAMING_SNAKE_CASE )
_a : str =np.array(SCREAMING_SNAKE_CASE )
_a : Any =k
_a : List[str] ={f"pass@{k}": estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ) -> List[str]:
def estimator(_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict =itertools.repeat(_UpperCAmelCase ,len(_UpperCAmelCase ) )
else:
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_a : str =iter(_UpperCAmelCase )
return np.array([estimator(int(_UpperCAmelCase ) ,int(_UpperCAmelCase ) ,_UpperCAmelCase ) for n, c in zip(_UpperCAmelCase ,_UpperCAmelCase )] )
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Union[str, Any] = '''▁'''
A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__: Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : int =monolingual_vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : List[Any] ={}
_a : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[Any] =cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_a : int =line.strip().split()[0]
_a : str =len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[int] =len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =self.__dict__.copy()
_a : Optional[Any] =None
_a : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple ={}
_a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
_a : int =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[str] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Any =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
from __future__ import annotations
from math import ceil, floor, sqrt
def _UpperCAmelCase ( UpperCamelCase: int = 2_0_0_0_0_0_0 ):
"""simple docstring"""
__lowerCAmelCase = [0]
__lowerCAmelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCAmelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCAmelCase = 0
# an estimate of b, using the quadratic formula
__lowerCAmelCase = 42
# the largest integer less than b_estimate
__lowerCAmelCase = 42
# the largest integer less than b_estimate
__lowerCAmelCase = 42
# the triangle number corresponding to b_floor
__lowerCAmelCase = 42
# the triangle number corresponding to b_ceil
__lowerCAmelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCAmelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCAmelCase = floor(UpperCamelCase )
__lowerCAmelCase = ceil(UpperCamelCase )
__lowerCAmelCase = triangle_numbers[b_floor]
__lowerCAmelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCAmelCase = triangle_b_first_guess * triangle_a
__lowerCAmelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCAmelCase = triangle_b_second_guess * triangle_a
__lowerCAmelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 376 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 269 |
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float = 0 ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Tuple = row, column
__UpperCamelCase : Tuple = [[default_value for c in range(lowerCamelCase__ )] for r in range(lowerCamelCase__ )]
def __str__( self : List[Any] ):
"""simple docstring"""
__UpperCamelCase : Dict = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__UpperCamelCase : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__UpperCamelCase : int = max(lowerCamelCase__ , len(str(lowerCamelCase__ ) ) )
__UpperCamelCase : Union[str, Any] = f'%{max_element_length}s'
# Make string and return
def single_line(lowerCamelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
__UpperCamelCase : List[Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase__ ) for row_vector in self.array )
return s
def __repr__( self : Any ):
"""simple docstring"""
return str(self )
def a ( self : str , lowerCamelCase__ : tuple[int, int] ):
"""simple docstring"""
if not (isinstance(lowerCamelCase__ , (list, tuple) ) and len(lowerCamelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Union[str, Any] , lowerCamelCase__ : tuple[int, int] ):
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Union[str, Any] , lowerCamelCase__ : tuple[int, int] , lowerCamelCase__ : float ):
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase__ )
__UpperCamelCase : str = value
def __add__( self : int , lowerCamelCase__ : Matrix ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert self.row == another.row and self.column == another.column
# Add
__UpperCamelCase : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCamelCase : Optional[int] = self[r, c] + another[r, c]
return result
def __neg__( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCamelCase : Any = -self[r, c]
return result
def __sub__( self : Tuple , lowerCamelCase__ : Matrix ):
"""simple docstring"""
return self + (-another)
def __mul__( self : Tuple , lowerCamelCase__ : int | float | Matrix ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , (int, float) ): # Scalar multiplication
__UpperCamelCase : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCamelCase : Tuple = self[r, c] * another
return result
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ): # Matrix multiplication
assert self.column == another.row
__UpperCamelCase : List[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__UpperCamelCase : Any = f'Unsupported type given for another ({type(lowerCamelCase__ )})'
raise TypeError(lowerCamelCase__ )
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Dict = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__UpperCamelCase : str = self[r, c]
return result
def a ( self : Any , lowerCamelCase__ : Matrix , lowerCamelCase__ : Matrix ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__UpperCamelCase : Optional[int] = v.transpose()
__UpperCamelCase : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __lowerCamelCase ( ) -> None:
# a^(-1)
__UpperCamelCase : str = Matrix(3 , 3 , 0 )
for i in range(3 ):
__UpperCamelCase : List[str] = 1
print(f'a^(-1) is {ainv}' )
# u, v
__UpperCamelCase : Any = Matrix(3 , 1 , 0 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = 1, 2, -3
__UpperCamelCase : int = Matrix(3 , 1 , 0 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowerCAmelCase , __lowerCAmelCase )}' )
def __lowerCamelCase ( ) -> None:
import doctest
doctest.testmod()
testa()
| 269 | 1 |
"""simple docstring"""
A__ : Optional[int] = 8.314_4598
def a__ ( lowerCAmelCase : float , lowerCAmelCase : float ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
A__ : List[Any] = 300
A__ : Dict = 28
A__ : Tuple = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 660 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 1 |
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(__snake_case , __snake_case ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
__lowerCAmelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowerCAmelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
import itertools
import string
from collections.abc import Generator, Iterable
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = iter(__snake_case )
while True:
__lowerCAmelCase = tuple(itertools.islice(__snake_case , __snake_case ) )
if not chunk:
return
yield chunk
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
__lowerCAmelCase = ""
if len(__snake_case ) < 2:
return dirty
for i in range(len(__snake_case ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__snake_case ) & 1:
clean += "X"
return clean
def __lowerCAmelCase ( __snake_case ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__lowerCAmelCase = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__lowerCAmelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__snake_case )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__snake_case )
return table
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = generate_table(__snake_case )
__lowerCAmelCase = prepare_input(__snake_case )
__lowerCAmelCase = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__snake_case , 2 ):
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = generate_table(__snake_case )
__lowerCAmelCase = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__snake_case , 2 ):
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 367 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = to_pil_image(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = pil_image.size
SCREAMING_SNAKE_CASE : Tuple = pytesseract.image_to_data(a__ , lang=a__ , output_type='''dict''' , config=a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE : int = [idx for idx, word in enumerate(a__ ) if not word.strip()]
SCREAMING_SNAKE_CASE : Tuple = [word for idx, word in enumerate(a__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : str = [coord for idx, coord in enumerate(a__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : str = [coord for idx, coord in enumerate(a__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : int = [coord for idx, coord in enumerate(a__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : Optional[int] = [coord for idx, coord in enumerate(a__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE : Dict = []
for x, y, w, h in zip(a__ , a__ , a__ , a__ ):
SCREAMING_SNAKE_CASE : Any = [x, y, x + w, y + h]
actual_boxes.append(a__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE : Optional[int] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(a__ , a__ , a__ ) )
assert len(a__ ) == len(a__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = ['pixel_values']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = "" , **_lowerCamelCase , ) ->None:
super().__init__(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = do_resize
SCREAMING_SNAKE_CASE : List[str] = size
SCREAMING_SNAKE_CASE : Dict = resample
SCREAMING_SNAKE_CASE : Dict = do_rescale
SCREAMING_SNAKE_CASE : Optional[int] = rescale_value
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE : Tuple = apply_ocr
SCREAMING_SNAKE_CASE : List[Any] = ocr_lang
SCREAMING_SNAKE_CASE : Dict = tesseract_config
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ) ->np.ndarray:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = (size['''height'''], size['''width'''])
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ) ->np.ndarray:
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ) ->np.ndarray:
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ) ->PIL.Image.Image:
SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Any = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE : List[str] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Optional[Any] = [to_numpy_array(_lowerCamelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Dict = []
for image in images:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = apply_tesseract(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
words_batch.append(_lowerCamelCase )
boxes_batch.append(_lowerCamelCase )
if do_resize:
SCREAMING_SNAKE_CASE : List[str] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : List[Any] = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
SCREAMING_SNAKE_CASE : List[str] = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
SCREAMING_SNAKE_CASE : Optional[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_lowerCamelCase )
if apply_ocr:
SCREAMING_SNAKE_CASE : Tuple = words_batch
SCREAMING_SNAKE_CASE : List[Any] = boxes_batch
return data
| 333 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[2, 2, 3, 2] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=["stage2", "stage3", "stage4"] , _lowerCamelCase=3 , _lowerCamelCase=None , ) ->Dict:
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_stages
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : List[str] = depths
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = out_features
SCREAMING_SNAKE_CASE : List[Any] = num_labels
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : str = num_stages
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->Dict:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCAmelCase ( self ) ->List[Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetForSemanticSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : int = UperNetModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->Optional[int]:
return
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) ->str:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
def __lowerCAmelCase ( self ) ->Union[str, Any]:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = _config_zero_init(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
SCREAMING_SNAKE_CASE : int = Image.open(a__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
SCREAMING_SNAKE_CASE : Tuple = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : int = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : Any = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 333 | 1 |
from math import pow
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
UpperCAmelCase = int(pow(_lowerCAmelCase , _lowerCAmelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
UpperCAmelCase , UpperCAmelCase = backtrack(
_lowerCAmelCase , _lowerCAmelCase , current_number + 1 , _lowerCAmelCase , _lowerCAmelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
UpperCAmelCase , UpperCAmelCase = backtrack(
_lowerCAmelCase , _lowerCAmelCase , current_number + 1 , _lowerCAmelCase , _lowerCAmelCase )
return current_sum, solutions_count
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(_lowerCAmelCase , _lowerCAmelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
__lowerCAmelCase ={
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class __magic_name__ ( _a):
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
_UpperCAmelCase : List[Any] = DistilBertTokenizer
def __init__( self : List[str] ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : Tuple=None ,__SCREAMING_SNAKE_CASE : Dict=True ,__SCREAMING_SNAKE_CASE : List[Any]="[UNK]" ,__SCREAMING_SNAKE_CASE : List[Any]="[SEP]" ,__SCREAMING_SNAKE_CASE : Tuple="[PAD]" ,__SCREAMING_SNAKE_CASE : Union[str, Any]="[CLS]" ,__SCREAMING_SNAKE_CASE : Optional[Any]="[MASK]" ,__SCREAMING_SNAKE_CASE : str=True ,__SCREAMING_SNAKE_CASE : str=None ,**__SCREAMING_SNAKE_CASE : Union[str, Any] ,):
super().__init__(
__SCREAMING_SNAKE_CASE ,tokenizer_file=__SCREAMING_SNAKE_CASE ,do_lower_case=__SCREAMING_SNAKE_CASE ,unk_token=__SCREAMING_SNAKE_CASE ,sep_token=__SCREAMING_SNAKE_CASE ,pad_token=__SCREAMING_SNAKE_CASE ,cls_token=__SCREAMING_SNAKE_CASE ,mask_token=__SCREAMING_SNAKE_CASE ,tokenize_chinese_chars=__SCREAMING_SNAKE_CASE ,strip_accents=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,__SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("strip_accents" ,__SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,__SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(__SCREAMING_SNAKE_CASE ,normalizer_state.pop("type" ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = do_lower_case
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : Any=None ):
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[int] ,__SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Optional[str] = None ):
UpperCAmelCase = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE ,name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 333 | 1 |
"""simple docstring"""
import math
def _snake_case ( UpperCamelCase : Optional[Any] ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( UpperCamelCase : Tuple = 0.1 ):
UpperCAmelCase : Optional[Any] = 3
UpperCAmelCase : Dict = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__UpperCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _snake_case ( UpperCamelCase : str = "AAPL" ):
UpperCAmelCase : Any = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
UpperCAmelCase : Optional[int] = BeautifulSoup(requests.get(UpperCamelCase ).text , """html.parser""" )
UpperCAmelCase : int = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 359 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowerCamelCase : Tuple = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Any = False
__lowerCamelCase : List[Any] = False
def UpperCamelCase__ ( self: str , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: Optional[int]=False ):
UpperCamelCase_ =super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class in get_values(a__ ):
UpperCamelCase_ =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: str=13 , UpperCamelCase_: str=7 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: str=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: Optional[int]=99 , UpperCamelCase_: List[str]=32 , UpperCamelCase_: Optional[Any]=32 , UpperCamelCase_: str=2 , UpperCamelCase_: List[Any]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: List[Any]=512 , UpperCamelCase_: int=16 , UpperCamelCase_: Any=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: int=None , ):
UpperCamelCase_ =parent
UpperCamelCase_ =batch_size
UpperCamelCase_ =seq_length
UpperCamelCase_ =is_training
UpperCamelCase_ =use_input_mask
UpperCamelCase_ =use_token_type_ids
UpperCamelCase_ =use_labels
UpperCamelCase_ =vocab_size
UpperCamelCase_ =hidden_size
UpperCamelCase_ =num_hidden_layers
UpperCamelCase_ =num_attention_heads
UpperCamelCase_ =intermediate_size
UpperCamelCase_ =hidden_act
UpperCamelCase_ =hidden_dropout_prob
UpperCamelCase_ =attention_probs_dropout_prob
UpperCamelCase_ =max_position_embeddings
UpperCamelCase_ =type_vocab_size
UpperCamelCase_ =type_sequence_label_size
UpperCamelCase_ =initializer_range
UpperCamelCase_ =num_labels
UpperCamelCase_ =num_choices
UpperCamelCase_ =scope
UpperCamelCase_ =embedding_size
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ =None
if self.use_input_mask:
UpperCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ =None
if self.use_token_type_ids:
UpperCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ =None
UpperCamelCase_ =None
UpperCamelCase_ =None
if self.use_labels:
UpperCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ =ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ =MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Optional[int] ):
UpperCamelCase_ =TFMobileBertModel(config=a__ )
UpperCamelCase_ ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase_ =model(a__ )
UpperCamelCase_ =[input_ids, input_mask]
UpperCamelCase_ =model(a__ )
UpperCamelCase_ =model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Any ):
UpperCamelCase_ =TFMobileBertForMaskedLM(config=a__ )
UpperCamelCase_ ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase_ =model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any] ):
UpperCamelCase_ =TFMobileBertForNextSentencePrediction(config=a__ )
UpperCamelCase_ ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase_ =model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple ):
UpperCamelCase_ =TFMobileBertForPreTraining(config=a__ )
UpperCamelCase_ ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase_ =model(a__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
UpperCamelCase_ =self.num_labels
UpperCamelCase_ =TFMobileBertForSequenceClassification(config=a__ )
UpperCamelCase_ ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase_ =model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int] ):
UpperCamelCase_ =self.num_choices
UpperCamelCase_ =TFMobileBertForMultipleChoice(config=a__ )
UpperCamelCase_ =tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ =tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ =tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase_ =model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self: str , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] ):
UpperCamelCase_ =self.num_labels
UpperCamelCase_ =TFMobileBertForTokenClassification(config=a__ )
UpperCamelCase_ ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase_ =model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] ):
UpperCamelCase_ =TFMobileBertForQuestionAnswering(config=a__ )
UpperCamelCase_ ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase_ =model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) =config_and_inputs
UpperCamelCase_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =TFMobileBertModelTest.TFMobileBertModelTester(self )
UpperCamelCase_ =ConfigTester(self , config_class=a__ , hidden_size=37 )
def UpperCamelCase__ ( self: str ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self: Union[str, Any] ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*a__ )
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*a__ )
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a__ )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a__ )
def UpperCamelCase__ ( self: Union[str, Any] ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*a__ )
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*a__ )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a__ )
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*a__ )
@slow
def UpperCamelCase__ ( self: List[str] ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
UpperCamelCase_ =TFMobileBertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
UpperCamelCase_ =tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase_ =model(a__ )[0]
UpperCamelCase_ =[1, 6, 3_0522]
self.assertEqual(output.shape , a__ )
UpperCamelCase_ =tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-4 )
| 391 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCAmelCase = 2
class _SCREAMING_SNAKE_CASE :
def __init__( self : int , *, # begin keyword-only arguments
a__ : int="<s>" , a__ : int="<pad>" , a__ : Optional[int]="</s>" , a__ : Union[str, Any]="<unk>" , a__ : List[str]=None , ):
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = bos, unk, pad, eos
__magic_name__ = []
__magic_name__ = []
__magic_name__ = {}
__magic_name__ = self.add_symbol(a__ )
__magic_name__ = self.add_symbol(a__ )
__magic_name__ = self.add_symbol(a__ )
__magic_name__ = self.add_symbol(a__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(a__ )
__magic_name__ = len(self.symbols )
def __eq__( self : List[str] , a__ : List[Any] ):
return self.indices == other.indices
def __getitem__( self : int , a__ : Optional[Any] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Tuple ):
return len(self.symbols )
def __contains__( self : Dict , a__ : Union[str, Any] ):
return sym in self.indices
@classmethod
def snake_case__ ( cls : Dict , a__ : Union[str, Any] ):
__magic_name__ = cls()
d.add_from_file(a__ )
return d
def snake_case__ ( self : Any , a__ : List[str] , a__ : List[str]=1 , a__ : Union[str, Any]=False ):
if word in self.indices and not overwrite:
__magic_name__ = self.indices[word]
__magic_name__ = self.count[idx] + n
return idx
else:
__magic_name__ = len(self.symbols )
__magic_name__ = idx
self.symbols.append(a__ )
self.count.append(a__ )
return idx
def snake_case__ ( self : Optional[Any] , a__ : str ):
return 0
def snake_case__ ( self : str , a__ : Any ):
if isinstance(a__ , a__ ):
try:
with open(a__ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(a__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(a__ ) )
return
__magic_name__ = f.readlines()
__magic_name__ = self._load_meta(a__ )
for line in lines[indices_start_line:]:
try:
__magic_name__ , __magic_name__ = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
__magic_name__ = True
__magic_name__ , __magic_name__ = line.rsplit(''' ''' , 1 )
else:
__magic_name__ = False
__magic_name__ = int(a__ )
__magic_name__ = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(a__ ) )
self.add_symbol(a__ , n=a__ , overwrite=a__ )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__magic_name__ = dict((re.sub(R'''@@$''' , '''''' , a ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , a ), v) for k, v in d.items() )
__magic_name__ = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
__magic_name__ = d[k] # restore
return da
def UpperCamelCase ( a , a ) -> Tuple:
'''simple docstring'''
# prep
if not os.path.exists(a ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(a , exist_ok=a )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
__magic_name__ = os.path.join(a , '''checkpoint.pt''' )
if not os.path.isfile(a ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
__magic_name__ = torch.load(a , map_location='''cpu''' )
__magic_name__ = chkpt['''cfg''']['''model''']
# dicts
__magic_name__ = os.path.join(a , '''dict.txt''' )
if not os.path.isfile(a ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
__magic_name__ = Dictionary.load(a )
__magic_name__ = rewrite_dict_keys(src_dict.indices )
__magic_name__ = len(a )
__magic_name__ = os.path.join(a , VOCAB_FILES_NAMES['''vocab_file'''] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a , ensure_ascii=a , indent=a ) )
# merges_file (bpecodes)
__magic_name__ = os.path.join(a , '''bpecodes''' )
if not os.path.isfile(a ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
__magic_name__ = os.path.join(a , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(a , a )
# model config
__magic_name__ = os.path.join(a , '''config.json''' )
__magic_name__ = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a , ensure_ascii=a , indent=a ) )
# tokenizer config
__magic_name__ = os.path.join(a , a )
__magic_name__ = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a , ensure_ascii=a , indent=a ) )
# model
__magic_name__ = chkpt['''model''']
# remove unneeded keys
__magic_name__ = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(a , a )
__magic_name__ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
__magic_name__ = model_state_dict.pop(a )
else:
__magic_name__ = model_state_dict.pop(a )
__magic_name__ = BioGptConfig.from_pretrained(a )
__magic_name__ = BioGptForCausalLM(a )
# check that it loads ok
model_new.load_state_dict(a )
# save
__magic_name__ = os.path.join(a , a )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(a , a )
print('''Conversion is done!''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 432 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
def decorator(_SCREAMING_SNAKE_CASE : Any ):
_UpperCAmelCase = getattr(lowercase__ , '''handle_key''' , [] )
handle += [key]
setattr(lowercase__ , '''handle_key''' , lowercase__ )
return func
return decorator
def lowercase ( *_SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
def decorator(_SCREAMING_SNAKE_CASE : int ):
_UpperCAmelCase = getattr(lowercase__ , '''handle_key''' , [] )
handle += keys
setattr(lowercase__ , '''handle_key''' , lowercase__ )
return func
return decorator
class _a ( __a):
"""simple docstring"""
def __new__( cls : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->Optional[int]:
_UpperCAmelCase = super().__new__(cls , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not hasattr(lowerCAmelCase_ , '''key_handler''' ):
setattr(lowerCAmelCase_ , '''key_handler''' , {} )
setattr(lowerCAmelCase_ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(lowerCAmelCase_ , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def lowercase__ ( cls : Dict )->Optional[int]:
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(lowerCAmelCase_ )
_UpperCAmelCase = cls.key_handler.get(lowerCAmelCase_ )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def lowercase ( cls : int ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 719 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__A : str = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : bool , __UpperCamelCase : str = None , __UpperCamelCase : list = None )->int:
_UpperCAmelCase = None
_UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
_UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__UpperCamelCase ):
if item not in EXCLUDE_EXAMPLES:
_UpperCAmelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.isfile(__UpperCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__UpperCamelCase , feature_script=__UpperCamelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
_UpperCAmelCase = compare_against_test(
os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = '''\n'''.join(__UpperCamelCase )
if special_strings is not None:
for string in special_strings:
_UpperCAmelCase = diff.replace(__UpperCamelCase , '''''' )
self.assertEqual(__UpperCamelCase , '''''' )
def lowercase__ ( self : Tuple )->Any:
self.one_complete_example('''complete_nlp_example.py''' , __UpperCamelCase )
self.one_complete_example('''complete_nlp_example.py''' , __UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->int:
_UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
_UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.one_complete_example('''complete_cv_example.py''' , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""})
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = False
@classmethod
def lowercase__ ( cls : Optional[int] )->Optional[Any]:
super().setUpClass()
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowercase__ ( cls : Dict )->Any:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowercase__ ( self : Optional[int] )->Any:
_UpperCAmelCase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def lowercase__ ( self : Optional[int] )->Optional[int]:
_UpperCAmelCase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
_UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def lowercase__ ( self : Optional[Any] )->List[Any]:
_UpperCAmelCase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
_UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
self.assertNotIn('''epoch 0:''' , __UpperCamelCase )
self.assertIn('''epoch 1:''' , __UpperCamelCase )
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
_UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
if torch.cuda.is_available():
_UpperCAmelCase = torch.cuda.device_count()
else:
_UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __UpperCamelCase )
self.assertIn('''epoch 1:''' , __UpperCamelCase )
else:
self.assertIn('''epoch 0:''' , __UpperCamelCase )
self.assertIn('''epoch 1:''' , __UpperCamelCase )
@slow
def lowercase__ ( self : Dict )->List[Any]:
_UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
_UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
_UpperCAmelCase = re.findall('''({.+})''' , __UpperCamelCase )
_UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
_UpperCAmelCase = ast.literal_eval(__UpperCamelCase )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def lowercase__ ( self : Any )->List[Any]:
_UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Optional[int] )->Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
_UpperCAmelCase = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''tracking''' ) ) )
def lowercase__ ( self : Dict )->Dict:
_UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def lowercase__ ( self : Union[str, Any] )->Tuple:
_UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 95 | 0 |
from string import ascii_lowercase, ascii_uppercase
def _lowercase( __a : str ):
if not sentence:
return ""
a__ =dict(zip(__a , __a ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 20 |
from collections.abc import Sequence
from queue import Queue
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )->List[str]:
'''simple docstring'''
A_ : List[str] = start
A_ : Dict = end
A_ : Optional[Any] = val
A_ : Optional[int] = (start + end) // 2
A_ : List[Any] = left
A_ : Any = right
def __repr__( self )->List[Any]:
'''simple docstring'''
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : Union[str, Any] = collection
A_ : int = function
if self.collection:
A_ : Tuple = self._build_tree(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
self._update_tree(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
return self._query_range(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
if start == end:
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.collection[start] )
A_ : List[str] = (start + end) // 2
A_ : str = self._build_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = self._build_tree(mid + 1 , _SCREAMING_SNAKE_CASE )
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.fn(left.val , right.val ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
if node.start == i and node.end == i:
A_ : str = val
return
if i <= node.mid:
self._update_tree(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
self._update_tree(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[str] = self.fn(node.left.val , node.right.val )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _SCREAMING_SNAKE_CASE , node.mid ) , self._query_range(node.right , node.mid + 1 , _SCREAMING_SNAKE_CASE ) , )
else:
# range in right child tree
return self._query_range(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
if self.root is not None:
A_ : Any = Queue()
queue.put(self.root )
while not queue.empty():
A_ : List[str] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
UpperCamelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 590 | 0 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Any ):
lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : str = bertabert.config.encoder.vocab_size
lowercase__ : List[str] = tokenizer.sep_token_id
lowercase__ : Optional[Any] = tokenizer.cls_token_id
lowercase__ : int = 128
lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Optional[int] = val_dataset.select(range(16 ) )
lowercase__ : int = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 )
lowercase__ : Tuple = inputs.input_ids
lowercase__ : Optional[int] = inputs.attention_mask
lowercase__ : int = outputs.input_ids
lowercase__ : Dict = outputs.input_ids.copy()
lowercase__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : List[str] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Any = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : List[str] = self.get_auto_remove_tmp_dir()
lowercase__ : int = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : str = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 81 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A =get_tests_dir('fixtures')
__A =get_tests_dir('fixtures/dummy_feature_extractor_config.json')
__A =get_tests_dir('fixtures/dummy-config.json')
class _snake_case ( unittest.TestCase ):
def snake_case__ ( self):
UpperCAmelCase__ : str = 0
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""")
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : str = AutoFeatureExtractor.from_pretrained(_lowerCamelCase)
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase)
def snake_case__ ( self):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : Union[str, Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_lowerCamelCase).to_dict()
config_dict.pop("""feature_extractor_type""")
UpperCAmelCase__ : Optional[int] = WavaVecaFeatureExtractor(**_lowerCamelCase)
# save in new folder
model_config.save_pretrained(_lowerCamelCase)
config.save_pretrained(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(_lowerCamelCase)
# make sure private variable is not incorrectly saved
UpperCAmelCase__ : Optional[int] = json.loads(config.to_json_string())
self.assertTrue("""_processor_class""" not in dict_as_saved)
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(_lowerCamelCase)
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase)
def snake_case__ ( self):
with self.assertRaisesRegex(
_lowerCamelCase , """bert-base is not a local folder and is not a valid model identifier"""):
UpperCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained("""bert-base""")
def snake_case__ ( self):
with self.assertRaisesRegex(
_lowerCamelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"""):
UpperCAmelCase__ : int = AutoFeatureExtractor.from_pretrained(_lowerCamelCase , revision="""aaaaaa""")
def snake_case__ ( self):
with self.assertRaisesRegex(
_lowerCamelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""")
def snake_case__ ( self):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCamelCase):
UpperCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""")
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCamelCase):
UpperCAmelCase__ : int = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowerCamelCase)
UpperCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowerCamelCase)
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""")
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_lowerCamelCase , trust_remote_code=_lowerCamelCase)
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""")
def snake_case__ ( self):
try:
AutoConfig.register("""custom""" , _lowerCamelCase)
AutoFeatureExtractor.register(_lowerCamelCase , _lowerCamelCase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCamelCase):
AutoFeatureExtractor.register(_lowerCamelCase , _lowerCamelCase)
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(_lowerCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowerCamelCase)
UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(_lowerCamelCase)
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self):
class _snake_case ( a__ ):
lowerCAmelCase :Dict = True
try:
AutoConfig.register("""custom""" , _lowerCamelCase)
AutoFeatureExtractor.register(_lowerCamelCase , _lowerCamelCase)
# If remote code is not set, the default is to use local
UpperCAmelCase__ : int = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""")
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""")
self.assertTrue(feature_extractor.is_local)
# If remote code is disabled, we load the local one.
UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowerCamelCase)
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""")
self.assertTrue(feature_extractor.is_local)
# If remote is enabled, we load from the Hub
UpperCAmelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowerCamelCase)
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""")
self.assertTrue(not hasattr(_lowerCamelCase , """is_local"""))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] | 407 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _snake_case ( a__ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
super().__init__(features=_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase , **_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = Sql(
cache_dir=_lowerCamelCase , features=_lowerCamelCase , sql=_lowerCamelCase , con=_lowerCamelCase , **_lowerCamelCase , )
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : List[Any] = None
self.builder.download_and_prepare(
download_config=_lowerCamelCase , download_mode=_lowerCamelCase , verification_mode=_lowerCamelCase , base_path=_lowerCamelCase , )
# Build dataset for splits
UpperCAmelCase__ : Union[str, Any] = self.builder.as_dataset(
split="""train""" , verification_mode=_lowerCamelCase , in_memory=self.keep_in_memory)
return dataset
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''')
UpperCAmelCase__ : Optional[Any] = dataset
UpperCAmelCase__ : Optional[int] = name
UpperCAmelCase__ : str = con
UpperCAmelCase__ : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase__ : Union[str, Any] = num_proc
UpperCAmelCase__ : List[str] = to_sql_kwargs
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.to_sql_kwargs.pop("""sql""" , _lowerCamelCase)
UpperCAmelCase__ : List[Any] = self.to_sql_kwargs.pop("""con""" , _lowerCamelCase)
UpperCAmelCase__ : int = self.to_sql_kwargs.pop("""index""" , _lowerCamelCase)
UpperCAmelCase__ : Any = self._write(index=_lowerCamelCase , **self.to_sql_kwargs)
return written
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = args
UpperCAmelCase__ : Tuple = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
UpperCAmelCase__ : str = query_table(
table=self.dataset.data , key=slice(_lowerCamelCase , offset + self.batch_size) , indices=self.dataset._indices , )
UpperCAmelCase__ : List[str] = batch.to_pandas()
UpperCAmelCase__ : List[str] = df.to_sql(self.name , self.con , index=_lowerCamelCase , **_lowerCamelCase)
return num_rows or len(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , **_lowerCamelCase):
UpperCAmelCase__ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _lowerCamelCase , _lowerCamelCase)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written | 407 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __a( lowercase_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def a__ ( _SCREAMING_SNAKE_CASE ) -> str:
raise NotImplementedError()
@abstractmethod
def a__ ( self ) -> str:
raise NotImplementedError()
| 713 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
) | 300 | 0 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__lowerCamelCase : Dict = logging.getLogger(__name__)
class A__ ( __snake_case ):
def __UpperCamelCase( self , A_ , A_ , A_=None , A_=None ):
'''simple docstring'''
UpperCamelCase : Tuple = self.layer[current_layer](A_ , A_ , head_mask[current_layer] )
UpperCamelCase : str = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __snake_case , )
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase : Tuple = BertEncoderWithPabee(A_ )
self.init_weights()
UpperCamelCase : List[str] = 0
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : List[str] = 0
UpperCamelCase : List[Any] = 0
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = threshold
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : int = patience
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : List[Any] = 0
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.inference_layers_num / self.inference_instances_num
UpperCamelCase : List[Any] = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(A_ )
@add_start_docstrings_to_model_forward(A_ )
def __UpperCamelCase( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCamelCase : Optional[Any] = input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase : str = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCamelCase : Tuple = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase : Optional[int] = torch.ones(A_ , device=A_ )
if token_type_ids is None:
UpperCamelCase : int = torch.zeros(A_ , dtype=torch.long , device=A_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(A_ , A_ , A_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = encoder_hidden_states.size()
UpperCamelCase : Any = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCamelCase : List[Any] = torch.ones(A_ , device=A_ )
UpperCamelCase : Optional[int] = self.invert_attention_mask(A_ )
else:
UpperCamelCase : Union[str, Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase : Optional[int] = self.get_head_mask(A_ , self.config.num_hidden_layers )
UpperCamelCase : Union[str, Any] = self.embeddings(
input_ids=A_ , position_ids=A_ , token_type_ids=A_ , inputs_embeds=A_ )
UpperCamelCase : List[str] = embedding_output
if self.training:
UpperCamelCase : Dict = []
for i in range(self.config.num_hidden_layers ):
UpperCamelCase : Any = self.encoder.adaptive_forward(
A_ , current_layer=A_ , attention_mask=A_ , head_mask=A_ )
UpperCamelCase : List[str] = self.pooler(A_ )
UpperCamelCase : Union[str, Any] = output_layers[i](output_dropout(A_ ) )
res.append(A_ )
elif self.patience == 0: # Use all layers for inference
UpperCamelCase : Any = self.encoder(
A_ , attention_mask=A_ , head_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase : Tuple = self.pooler(encoder_outputs[0] )
UpperCamelCase : List[str] = [output_layers[self.config.num_hidden_layers - 1](A_ )]
else:
UpperCamelCase : Any = 0
UpperCamelCase : Any = None
UpperCamelCase : List[str] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCamelCase : Dict = self.encoder.adaptive_forward(
A_ , current_layer=A_ , attention_mask=A_ , head_mask=A_ )
UpperCamelCase : Dict = self.pooler(A_ )
UpperCamelCase : Dict = output_layers[i](A_ )
if regression:
UpperCamelCase : str = logits.detach()
if patient_result is not None:
UpperCamelCase : List[str] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCamelCase : Dict = 0
else:
UpperCamelCase : Dict = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCamelCase : int = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(A_ ) ):
patient_counter += 1
else:
UpperCamelCase : List[Any] = 0
UpperCamelCase : Optional[Any] = logits
if patient_counter == self.patience:
break
UpperCamelCase : Union[str, Any] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __snake_case , )
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase : Dict = config.num_labels
UpperCamelCase : int = BertModelWithPabee(A_ )
UpperCamelCase : Optional[Any] = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase : Union[str, Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(A_ )
def __UpperCamelCase( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = self.bert(
input_ids=A_ , attention_mask=A_ , token_type_ids=A_ , position_ids=A_ , head_mask=A_ , inputs_embeds=A_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCamelCase : Any = (logits[-1],)
if labels is not None:
UpperCamelCase : Any = None
UpperCamelCase : Optional[int] = 0
for ix, logits_item in enumerate(A_ ):
if self.num_labels == 1:
# We are doing regression
UpperCamelCase : Tuple = MSELoss()
UpperCamelCase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase : int = CrossEntropyLoss()
UpperCamelCase : List[str] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCamelCase : Optional[int] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCamelCase : Union[str, Any] = (total_loss / total_weights,) + outputs
return outputs
| 629 |
import numpy as np
import qiskit
def A_ ( _lowerCAmelCase = 8 , _lowerCAmelCase = None ) -> str:
UpperCamelCase : Tuple = np.random.default_rng(seed=_lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
UpperCamelCase : List[str] = 6 * key_len
# Measurement basis for Alice's qubits.
UpperCamelCase : List[Any] = rng.integers(2 , size=_lowerCAmelCase )
# The set of states Alice will prepare.
UpperCamelCase : List[Any] = rng.integers(2 , size=_lowerCAmelCase )
# Measurement basis for Bob's qubits.
UpperCamelCase : Optional[int] = rng.integers(2 , size=_lowerCAmelCase )
# Quantum Circuit to simulate BB84
UpperCamelCase : List[Any] = qiskit.QuantumCircuit(_lowerCAmelCase , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
UpperCamelCase : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
UpperCamelCase : Tuple = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1 , seed_simulator=_lowerCAmelCase )
# Returns the result of measurement.
UpperCamelCase : Optional[Any] = job.result().get_counts(_lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
UpperCamelCase : Tuple = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
UpperCamelCase : Tuple = gen_key[:key_len] if len(_lowerCAmelCase ) >= key_len else gen_key.ljust(_lowerCAmelCase , "0" )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 629 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 703 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A , __A , __A , __A ) -> None:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCamelCase__ , UpperCamelCase__ = array[indexa], array[indexa]
def _UpperCamelCase ( __A , __A , __A , __A ) -> None:
'''simple docstring'''
if length > 1:
UpperCamelCase__ = int(length / 2 )
for i in range(__A , low + middle ):
comp_and_swap(__A , __A , i + middle , __A )
bitonic_merge(__A , __A , __A , __A )
bitonic_merge(__A , low + middle , __A , __A )
def _UpperCamelCase ( __A , __A , __A , __A ) -> None:
'''simple docstring'''
if length > 1:
UpperCamelCase__ = int(length / 2 )
bitonic_sort(__A , __A , __A , 1 )
bitonic_sort(__A , low + middle , __A , 0 )
bitonic_merge(__A , __A , __A , __A )
if __name__ == "__main__":
a__ : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
a__ : Any = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 223 | 0 |
'''simple docstring'''
def a__ ( lowercase : str, lowercase : str ) -> bool:
"""simple docstring"""
_UpperCamelCase = len(lowercase ) + 1
_UpperCamelCase = len(lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_UpperCamelCase = [[0 for i in range(lowercase )] for j in range(lowercase )]
# since string of zero length match pattern of zero length
_UpperCamelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1, lowercase ):
_UpperCamelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1, lowercase ):
_UpperCamelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1, lowercase ):
for j in range(1, lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_UpperCamelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_UpperCamelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_UpperCamelCase = dp[i - 1][j]
else:
_UpperCamelCase = 0
else:
_UpperCamelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowercase__ : str = 'aab'
lowercase__ : List[str] = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 98 |
import math
import unittest
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
assert isinstance(lowerCamelCase_ ,lowerCamelCase_) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowerCamelCase_) + 1) ,6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
with self.assertRaises(__lowerCamelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) ,'''Zero doesn\'t have any positive factors, primes must have exactly two.''' ,)
self.assertFalse(
is_prime(1 ) ,'''One only has 1 positive factor, primes must have exactly two.''' ,)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 647 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase ( __lowerCamelCase ):
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
lowercase_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__lowerCAmelCase , "tf_padding"))
self.parent.assertTrue(hasattr(__lowerCAmelCase , "depth_multiplier"))
class lowercase :
def __init__( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple=13 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[Any]=0.25 , __lowerCAmelCase : List[Any]=8 , __lowerCAmelCase : Dict=8 , __lowerCAmelCase : int=6 , __lowerCAmelCase : Dict=32 , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int="relu6" , __lowerCAmelCase : int=1280 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=10 , __lowerCAmelCase : str=None , ) -> Tuple:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = image_size
lowercase_ = depth_multiplier
lowercase_ = depth_divisible_by
lowercase_ = min_depth
lowercase_ = expand_ratio
lowercase_ = tf_padding
lowercase_ = output_stride
lowercase_ = first_layer_is_expansion
lowercase_ = finegrained_output
lowercase_ = hidden_act
lowercase_ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
lowercase_ = classifier_dropout_prob
lowercase_ = use_labels
lowercase_ = is_training
lowercase_ = num_labels
lowercase_ = initializer_range
lowercase_ = scope
def __UpperCAmelCase ( self : Tuple) -> Optional[Any]:
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowercase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]) -> Tuple:
lowercase_ = MobileNetVaModel(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowercase_ = model(__lowerCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __UpperCAmelCase ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str) -> int:
lowercase_ = self.num_labels
lowercase_ = MobileNetVaForImageClassification(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowercase_ = model(__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int]) -> List[Any]:
lowercase_ = self.num_labels
lowercase_ = MobileNetVaForSemanticSegmentation(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowercase_ = model(__lowerCAmelCase)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase_ = model(__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self : Optional[int]) -> Tuple:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
lowerCamelCase_ =(
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase_ =(
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase_ =False
lowerCamelCase_ =False
lowerCamelCase_ =False
lowerCamelCase_ =False
def __UpperCAmelCase ( self : int) -> str:
lowercase_ = MobileNetVaModelTester(self)
lowercase_ = MobileNetVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase)
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds")
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings")
def __UpperCAmelCase ( self : Tuple) -> Optional[Any]:
pass
@unittest.skip(reason="MobileNetV2 does not output attentions")
def __UpperCAmelCase ( self : Tuple) -> Tuple:
pass
def __UpperCAmelCase ( self : List[str]) -> str:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(__lowerCAmelCase)
lowercase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase)
def __UpperCAmelCase ( self : List[str]) -> str:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase)
def __UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
def check_hidden_states_output(__lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int):
lowercase_ = model_class(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase))
lowercase_ = outputs.hidden_states
lowercase_ = 16
self.assertEqual(len(__lowerCAmelCase) , __lowerCAmelCase)
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase)
def __UpperCAmelCase ( self : List[str]) -> Any:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase)
@slow
def __UpperCAmelCase ( self : int) -> Optional[int]:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = MobileNetVaModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
def __a ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224") if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : int) -> Optional[int]:
lowercase_ = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224").to(__lowerCAmelCase)
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors="pt").to(__lowerCAmelCase)
# forward pass
with torch.no_grad():
lowercase_ = model(**__lowerCAmelCase)
# verify the logits
lowercase_ = torch.Size((1, 1001))
self.assertEqual(outputs.logits.shape , __lowerCAmelCase)
lowercase_ = torch.tensor([0.2445, -1.1993, 0.1905]).to(__lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4))
@slow
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
lowercase_ = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
lowercase_ = model.to(__lowerCAmelCase)
lowercase_ = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
lowercase_ = prepare_img()
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors="pt").to(__lowerCAmelCase)
# forward pass
with torch.no_grad():
lowercase_ = model(**__lowerCAmelCase)
lowercase_ = outputs.logits
# verify the logits
lowercase_ = torch.Size((1, 21, 65, 65))
self.assertEqual(logits.shape , __lowerCAmelCase)
lowercase_ = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1e-4))
| 461 | '''simple docstring'''
def __a ( __lowerCamelCase : int = 200 ) -> int:
'''simple docstring'''
lowercase_ = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ = [0] * (pence + 1)
lowercase_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 461 | 1 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : List[str] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Tuple = 'umt5'
lowerCamelCase__ : List[str] = ['past_key_values']
def __init__( self : Any , __UpperCAmelCase : Dict=2_5_0_1_1_2 , __UpperCAmelCase : Tuple=5_1_2 , __UpperCAmelCase : Optional[Any]=6_4 , __UpperCAmelCase : List[Any]=1_0_2_4 , __UpperCAmelCase : Optional[Any]=8 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Any=6 , __UpperCAmelCase : Any=3_2 , __UpperCAmelCase : Any=1_2_8 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Union[str, Any]=1e-6 , __UpperCAmelCase : Optional[int]=1.0 , __UpperCAmelCase : Optional[int]="gated-gelu" , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : int=True , __UpperCAmelCase : List[str]="T5Tokenizer" , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : List[str]=0 , **__UpperCAmelCase : str , ) -> Dict:
super().__init__(
is_encoder_decoder=__UpperCAmelCase , tokenizer_class=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = d_kv
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = num_layers
SCREAMING_SNAKE_CASE__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = feed_forward_proj
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = self.feed_forward_proj.split("""-""" )
SCREAMING_SNAKE_CASE__ = act_info[-1]
SCREAMING_SNAKE_CASE__ = act_info[0] == """gated"""
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE__ = """gelu_new"""
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
return self.d_model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return self.num_heads
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return self.num_layers
class lowerCamelCase (A__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
SCREAMING_SNAKE_CASE__ = """past_encoder_sequence + sequence"""
SCREAMING_SNAKE_CASE__ = {0: """batch"""}
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """decoder_sequence"""}
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 1_3
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
return 5e-4
| 196 |
"""simple docstring"""
A_ : Any = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 196 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowerCAmelCase = logging.get_logger(__name__)
@dataclass
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__(self , **_UpperCAmelCase ) -> Optional[Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCamelCase : Optional[Any] = deprecated_arg[3:]
setattr(self , _UpperCAmelCase , not kwargs.pop(_UpperCAmelCase ) )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
__UpperCamelCase : Optional[Any] = kwargs.pop("torchscript" , self.torchscript )
__UpperCamelCase : Union[str, Any] = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
__UpperCamelCase : int = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**_UpperCAmelCase )
A = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Trace the models using torchscript"} )
A = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
A = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def a_ (self ) -> Tuple["torch.device", int]:
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
__UpperCamelCase : List[str] = torch.device("cpu" )
__UpperCamelCase : Tuple = 0
elif is_torch_tpu_available():
__UpperCamelCase : List[str] = xm.xla_device()
__UpperCamelCase : Union[str, Any] = 0
else:
__UpperCamelCase : int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__UpperCamelCase : Dict = torch.cuda.device_count()
return device, n_gpu
@property
def a_ (self ) -> Any:
return is_torch_tpu_available() and self.tpu
@property
def a_ (self ) -> int:
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def a_ (self ) -> "torch.device":
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def a_ (self ) -> int:
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def a_ (self ) -> Tuple:
return self.n_gpu > 0
| 399 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 399 | 1 |
'''simple docstring'''
lowercase__ =[
(10_00, 'M'),
(9_00, 'CM'),
(5_00, 'D'),
(4_00, 'CD'),
(1_00, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def UpperCamelCase_ ( A__ ):
a_ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
a_ = 0
a_ = 0
while place < len(A__ ):
if (place + 1 < len(A__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def UpperCamelCase_ ( A__ ):
a_ = []
for arabic, roman in ROMAN:
(a_) = divmod(A__ , A__ )
result.append(roman * factor )
if number == 0:
break
return "".join(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCamelCase__ ( UpperCAmelCase ) -> None:
"""simple docstring"""
_a : Optional[int] = analyze_text(UpperCAmelCase )
_a : int = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
_a : Optional[int] = sum(single_char_strings.values() )
# one length string
_a : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_a : Optional[Any] = single_char_strings[ch]
_a : Dict = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCAmelCase ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
_a : Dict = sum(two_char_strings.values() )
_a : Tuple = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_a : str = cha + cha
if sequence in two_char_strings:
_a : int = two_char_strings[sequence]
_a : List[str] = int(UpperCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(UpperCAmelCase )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def UpperCamelCase__ ( UpperCAmelCase ) -> tuple[dict, dict]:
"""simple docstring"""
_a : Optional[Any] = Counter() # type: ignore
_a : List[str] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 709 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = ['''input_values''', '''padding_mask''']
def __init__( self , lowercase = 1 , lowercase = 24_000 , lowercase = 0.0 , lowercase = None , lowercase = None , **lowercase , ) -> Union[str, Any]:
super().__init__(feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , **lowercase )
_a : int = chunk_length_s
_a : Any = overlap
@property
def snake_case__( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , lowercase , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
_a : int = True
_a : Union[str, Any] = bool(
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_a : Any = [np.asarray(lowercase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
_a : int = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_a : Tuple = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_a : str = [np.asarray(lowercase ).T]
# verify inputs are valid
for idx, example in enumerate(lowercase ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
_a : Any = None
_a : List[str] = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_a : Union[str, Any] = min(array.shape[0] for array in raw_audio )
_a : int = int(np.floor(max_length / self.chunk_stride ) )
_a : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_a : int = max(array.shape[0] for array in raw_audio )
_a : Any = int(np.ceil(max_length / self.chunk_stride ) )
_a : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
_a : Any = '''max_length'''
else:
_a : Tuple = input_values
# normal padding on batch
if padded_inputs is None:
_a : List[Any] = self.pad(
lowercase , max_length=lowercase , truncation=lowercase , padding=lowercase , return_attention_mask=lowercase , )
if padding:
_a : str = padded_inputs.pop('''attention_mask''' )
_a : List[str] = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
_a : str = example[..., None]
input_values.append(example.T )
_a : Union[str, Any] = input_values
if return_tensors is not None:
_a : Optional[Any] = padded_inputs.convert_to_tensors(lowercase )
return padded_inputs | 307 | 0 |
from collections import deque
from .hash_table import HashTable
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , *__A : str , **__A : Union[str, Any] ):
super().__init__(*__A , **__A )
def lowerCAmelCase_ ( self : str , __A : str , __A : Optional[Any] ):
__A : Tuple = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__A )
__A : Optional[int] = self.values[key]
def lowerCAmelCase_ ( self : Any ):
return (
sum(self.charge_factor - len(__A ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase_ ( self : Optional[int] , __A : int , __A : Union[str, Any]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__A ) == 0
):
return key
return super()._collision_resolution(__A , __A )
| 17 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case ( lowercase_ ):
"""simple docstring"""
def __init__( self, _lowercase, _lowercase, _lowercase ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=_lowercase, unet=_lowercase, scheduler=_lowercase )
@torch.no_grad()
def __call__( self, _lowercase = 1, _lowercase = None, _lowercase = 0.0, _lowercase = 50, _lowercase = "pil", _lowercase = True, **_lowercase, ) -> Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=_lowercase, )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowercase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
SCREAMING_SNAKE_CASE_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE_ = {}
if accepts_eta:
SCREAMING_SNAKE_CASE_ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_lowercase, _lowercase )
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(_lowercase, _lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_lowercase, _lowercase, _lowercase, **_lowercase ).prev_sample
# decode the image latents with the VAE
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_lowercase ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0, 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 294 | 0 |
'''simple docstring'''
import numpy as np
def snake_case_ ( __snake_case : np.ndarray) -> np.ndarray:
return 1 / (1 + np.exp(-vector))
def snake_case_ ( __snake_case : np.ndarray) -> np.ndarray:
return vector * sigmoid(__snake_case)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 606 | '''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
A_ : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(__a )
class __UpperCAmelCase ( __a ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
requires_backends(self , '''decord''' )
self.check_model_type(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ):
lowerCAmelCase_ = {}
if frame_sampling_rate is not None:
lowerCAmelCase_ = frame_sampling_rate
if num_frames is not None:
lowerCAmelCase_ = num_frames
lowerCAmelCase_ = {}
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _lowerCamelCase , **_lowerCamelCase ):
return super().__call__(_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=1 ):
if num_frames is None:
lowerCAmelCase_ = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
lowerCAmelCase_ = BytesIO(requests.get(_lowerCamelCase ).content )
lowerCAmelCase_ = VideoReader(_lowerCamelCase )
videoreader.seek(0 )
lowerCAmelCase_ = 0
lowerCAmelCase_ = num_frames * frame_sampling_rate - 1
lowerCAmelCase_ = np.linspace(_lowerCamelCase , _lowerCamelCase , num=_lowerCamelCase , dtype=np.intaa )
lowerCAmelCase_ = videoreader.get_batch(_lowerCamelCase ).asnumpy()
lowerCAmelCase_ = list(_lowerCamelCase )
lowerCAmelCase_ = self.image_processor(_lowerCamelCase , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = self.model(**_lowerCamelCase )
return model_outputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=5 ):
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase_ ,lowerCAmelCase_ = probs.topk(_lowerCamelCase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCamelCase , _lowerCamelCase )]
| 606 | 1 |
import math
import os
import sys
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : str = ''''''
try:
with open(lowerCAmelCase__ ,'''rb''') as binary_file:
lowerCAmelCase__ : List[Any] = binary_file.read()
for dat in data:
lowerCAmelCase__ : Tuple = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''')
sys.exit()
def lowerCAmelCase__ ( lowerCamelCase_ : dict[str, str] ,lowerCamelCase_ : str ,lowerCamelCase_ : int ,lowerCamelCase_ : str):
'''simple docstring'''
lexicon.pop(lowerCAmelCase__)
lowerCAmelCase__ : Union[str, Any] = last_match_id
if math.loga(lowerCAmelCase__).is_integer():
for curr_key in lexicon:
lowerCAmelCase__ : int = '''0''' + lexicon[curr_key]
lowerCAmelCase__ : int = bin(lowerCAmelCase__)[2:]
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : int = {'''0''': '''0''', '''1''': '''1'''}
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = '''''', ''''''
lowerCAmelCase__ : List[str] = len(lowerCAmelCase__)
for i in range(len(lowerCAmelCase__)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase__ : Union[str, Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__)
index += 1
lowerCAmelCase__ : List[Any] = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowerCAmelCase__ : Optional[int] = lexicon[curr_string]
result += last_match_id
return result
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = os.path.getsize(lowerCAmelCase__)
lowerCAmelCase__ : List[Any] = bin(lowerCAmelCase__)[2:]
lowerCAmelCase__ : Tuple = len(lowerCAmelCase__)
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = 8
try:
with open(lowerCAmelCase__ ,'''wb''') as opened_file:
lowerCAmelCase__ : Dict = [
to_write[i : i + byte_length]
for i in range(0 ,len(lowerCAmelCase__) ,lowerCAmelCase__)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('''10000000''')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCAmelCase__ ,2).to_bytes(1 ,byteorder='''big'''))
except OSError:
print('''File not accessible''')
sys.exit()
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = read_file_binary(lowerCAmelCase__)
lowerCAmelCase__ : int = compress_data(lowerCAmelCase__)
lowerCAmelCase__ : str = add_file_length(lowerCAmelCase__ ,lowerCAmelCase__)
write_file_binary(lowerCAmelCase__ ,lowerCAmelCase__)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 647 |
import re
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
if len(re.findall('[ATCG]' , lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 106 | 0 |
from __future__ import annotations
def UpperCamelCase ( _A : int )-> bool:
"""simple docstring"""
A__ = str(_A )
return len(_A ) == 9 and set(_A ) == set("123456789" )
def UpperCamelCase ( )-> int | None:
"""simple docstring"""
for base_num in range(9999 , 4999 , -1 ):
A__ = 100002 * base_num
if is_9_pandigital(_A ):
return candidate
for base_num in range(333 , 99 , -1 ):
A__ = 1002003 * base_num
if is_9_pandigital(_A ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 232 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase_ : int = pytest.mark.integration
@require_faiss
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
A__ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(UpperCAmelCase__ ) for x in np.arange(30 ).tolist()]} )
return dset
def __A ( self ):
import faiss
A__ = self._create_dummy_dataset()
A__ = dset.map(
lambda UpperCAmelCase__ , UpperCAmelCase__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ )
A__ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
A__ , A__ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def __A ( self ):
import faiss
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
A__ , A__ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __A ( self ):
import faiss
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase__ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
A__ , A__ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __A ( self ):
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(UpperCAmelCase__ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def __A ( self ):
from elasticsearch import Elasticsearch
A__ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
A__ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
A__ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
A__ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=UpperCAmelCase__ )
A__ , A__ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(UpperCAmelCase__ )
self.assertRaises(UpperCAmelCase__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
A__ = np.eye(5 , dtype=np.floataa )[::-1]
A__ , A__ = index.search_batch(UpperCAmelCase__ )
self.assertRaises(UpperCAmelCase__ , index.search_batch , queries[0] )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase__ )
def __A ( self ):
import faiss
A__ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
A__ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase__ ):
A__ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def __A ( self ):
import faiss
A__ = faiss.IndexFlat(5 )
A__ = FaissIndex(custom_index=UpperCAmelCase__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __A ( self ):
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase__ ) as tmp_file:
index.save(tmp_file.name )
A__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(UpperCAmelCase__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def UpperCamelCase ( _A : Union[str, Any] )-> List[Any]:
"""simple docstring"""
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
A__ = "index.faiss"
A__ = f"""mock://{index_name}"""
index.save(_A , storage_options=mockfs.storage_options )
A__ = FaissIndex.load(_A , storage_options=mockfs.storage_options )
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(_A )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
A__ = Elasticsearch()
A__ = {"acknowledged": True}
A__ = ElasticSearchIndex(es_client=UpperCAmelCase__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
A__ = "foo"
A__ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
A__ , A__ = index.search(UpperCAmelCase__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
A__ = "foo"
A__ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
A__ , A__ = index.search(UpperCAmelCase__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
A__ = ["foo", "bar", "foobar"]
A__ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
A__ , A__ = index.search_batch(UpperCAmelCase__ )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase__ )
# batched queries with timeout
A__ = ["foo", "bar", "foobar"]
A__ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
A__ , A__ = index.search_batch(UpperCAmelCase__ , request_timeout=30 )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase__ )
| 232 | 1 |
import numpy as np
__SCREAMING_SNAKE_CASE =[
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class __magic_name__ :
'''simple docstring'''
def __init__( self: List[str] ):
SCREAMING_SNAKE_CASE_ = np.array(UpperCamelCase__ )
def _A ( self: int , _lowerCamelCase: int ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.where(letter == self.SQUARE )
SCREAMING_SNAKE_CASE_ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _A ( self: Optional[int] , _lowerCamelCase: Tuple , _lowerCamelCase: Any ):
SCREAMING_SNAKE_CASE_ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _A ( self: Optional[Any] , _lowerCamelCase: Optional[int] ):
SCREAMING_SNAKE_CASE_ = message.lower()
SCREAMING_SNAKE_CASE_ = message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ = message.replace('''j''' , '''i''' )
SCREAMING_SNAKE_CASE_ = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE_ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE_ = numbers[0]
SCREAMING_SNAKE_CASE_ = numbers[1]
SCREAMING_SNAKE_CASE_ = first_step.reshape(2 * len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE_ = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE_ = int(second_step[numbers_index * 2] )
SCREAMING_SNAKE_CASE_ = int(second_step[(numbers_index * 2) + 1] )
SCREAMING_SNAKE_CASE_ = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = encoded_message + letter
return encoded_message
def _A ( self: Any , _lowerCamelCase: Optional[int] ):
SCREAMING_SNAKE_CASE_ = message.lower()
message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE_ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE_ = numbers[0]
SCREAMING_SNAKE_CASE_ = numbers[1]
SCREAMING_SNAKE_CASE_ = first_step.reshape((2, len(UpperCamelCase__ )) )
SCREAMING_SNAKE_CASE_ = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE_ = int(second_step[0, numbers_index] )
SCREAMING_SNAKE_CASE_ = int(second_step[1, numbers_index] )
SCREAMING_SNAKE_CASE_ = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = decoded_message + letter
return decoded_message
| 234 | '''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase_ ( _A ):
a_ = """"""
a_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
UpperCAmelCase_ = repo_info
UpperCAmelCase_ = token
UpperCAmelCase_ = None
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> str:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase_ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase_ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase_ = p.parent
if root == path:
UpperCAmelCase_ = f
UpperCAmelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__ = 4
lowercase__ = 48
lowercase__ = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ = [6, 6, 6, 6]
lowercase__ = 60
lowercase__ = [6, 6, 6, 6]
lowercase__ = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ = 4
lowercase__ = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__ = 1
lowercase__ = 1
lowercase__ = 126
lowercase__ = 7
lowercase__ = 2_5_5.0
lowercase__ = ''
return config
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
lowercase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowercase__ = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
lowercase__ = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
lowercase__ = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
lowercase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase__ = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
lowercase__ = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
lowercase__ = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
lowercase__ = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
lowercase__ = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
lowercase__ = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
lowercase__ = 'layernorm.weight'
if name == "norm.bias":
lowercase__ = 'layernorm.bias'
if "conv_first" in name:
lowercase__ = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__ = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__ = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
lowercase__ = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
lowercase__ = name.replace('upsample.2' , 'upsample.convolution_1' )
lowercase__ = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
lowercase__ = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
lowercase__ = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
lowercase__ = 'swin2sr.' + name
return name
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
lowercase__ = key.split('.' )
lowercase__ = int(key_split[1] )
lowercase__ = int(key_split[4] )
lowercase__ = config.embed_dim
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
pass
else:
lowercase__ = val
return orig_state_dict
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = get_config(SCREAMING_SNAKE_CASE_ )
lowercase__ = SwinaSRForImageSuperResolution(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
lowercase__ = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(SCREAMING_SNAKE_CASE_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
lowercase__ = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('RGB' )
lowercase__ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__ = 126 if 'Jpeg' in checkpoint_url else 256
lowercase__ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ = transforms(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__ = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 512, 512] )
lowercase__ = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 1024, 1024] )
lowercase__ = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__ = torch.Size([1, 3, 1024, 1024] )
lowercase__ = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 512, 512] )
lowercase__ = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 1024, 1024] )
lowercase__ = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
print('Looks ok!' )
lowercase__ = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
lowercase__ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
lowercase_ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 715 |
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 10**-10 ) -> float:
__SCREAMING_SNAKE_CASE = a
while True:
__SCREAMING_SNAKE_CASE = Decimal(UpperCAmelCase__ ) - (
Decimal(eval(UpperCAmelCase__ ) ) / Decimal(eval(str(diff(UpperCAmelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(UpperCAmelCase__ ) ) < precision: # noqa: S307
return float(UpperCAmelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 482 |
"""simple docstring"""
def _a ( UpperCAmelCase__ ) -> int:
__SCREAMING_SNAKE_CASE = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
__SCREAMING_SNAKE_CASE = hex_num[0] == '''-'''
if is_negative:
__SCREAMING_SNAKE_CASE = hex_num[1:]
try:
__SCREAMING_SNAKE_CASE = int(UpperCAmelCase__ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
__SCREAMING_SNAKE_CASE = ''''''
while int_num > 0:
__SCREAMING_SNAKE_CASE = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 482 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__A : str = False
__A : Optional[int] = True
__A : List[Any] = False
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__A : List[str] = parser.parse_args()
__A : Tuple = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
__A : Optional[Any] = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
__A : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
__A : Dict = reader.read()
__A : Dict = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
__A : Optional[int] = UNetaDModel(**config)
else:
__A : Any = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
__A : List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__A : List[str] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__A : List[Any] = config[key]
del config[key]
__A : List[Any] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
__A : Optional[Any] = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
__A : int = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
__A : Tuple = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
__A : Any = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
__A : Any = param_value
__A : List[str] = True
if not has_changed:
__A : List[str] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 187 |
'''simple docstring'''
from __future__ import annotations
class lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : int ) -> None:
'''simple docstring'''
lowerCamelCase__ = order
# a_{0} ... a_{k}
lowerCamelCase__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCamelCase__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCamelCase__ = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCamelCase__ = [0.0] * self.order
def a__ ( self : Dict , __lowerCamelCase : list[float] , __lowerCamelCase : list[float] ) -> None:
'''simple docstring'''
if len(__lowerCamelCase ) < self.order:
lowerCamelCase__ = [1.0, *a_coeffs]
if len(__lowerCamelCase ) != self.order + 1:
lowerCamelCase__ = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__lowerCamelCase )}'''
)
raise ValueError(__lowerCamelCase )
if len(__lowerCamelCase ) != self.order + 1:
lowerCamelCase__ = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__lowerCamelCase )}'''
)
raise ValueError(__lowerCamelCase )
lowerCamelCase__ = a_coeffs
lowerCamelCase__ = b_coeffs
def a__ ( self : Dict , __lowerCamelCase : float ) -> float:
'''simple docstring'''
lowerCamelCase__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCamelCase__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCamelCase__ = self.input_history[:-1]
lowerCamelCase__ = self.output_history[:-1]
lowerCamelCase__ = sample
lowerCamelCase__ = result
return result
| 187 | 1 |
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowercase ( unittest.TestCase ):
def _snake_case ( self , lowercase ) -> Optional[Any]:
lowerCAmelCase = 3
lowerCAmelCase = 250
lowerCAmelCase = ids_tensor((batch_size, length) , lowercase )
lowerCAmelCase = torch.ones((batch_size, length) , device=lowercase , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ) -> Any:
lowerCAmelCase , lowerCAmelCase = self._get_tensors(5 )
lowerCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowercase , lowercase ) )
lowerCAmelCase , lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowercase , lowercase ) )
lowerCAmelCase , lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowercase , lowercase ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = MaxLengthCriteria(max_length=10 )
lowerCAmelCase , lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowercase , lowercase ) )
lowerCAmelCase , lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowercase , lowercase ) )
lowerCAmelCase , lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowercase , lowercase ) )
def _snake_case ( self ) -> int:
lowerCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase , lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowercase , lowercase ) )
lowerCAmelCase , lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowercase , lowercase ) )
lowerCAmelCase , lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowercase , lowercase ) )
lowerCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ) -> Any:
lowerCAmelCase , lowerCAmelCase = self._get_tensors(5 )
lowerCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowercase , lowercase ) )
lowerCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowercase , lowercase ) )
def _snake_case ( self ) -> int:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowercase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowercase ) , 1 )
| 532 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'fnet'
def __init__( self , lowercase=32_000 , lowercase=768 , lowercase=12 , lowercase=3_072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=512 , lowercase=4 , lowercase=0.02 , lowercase=1e-12 , lowercase=False , lowercase=512 , lowercase=3 , lowercase=1 , lowercase=2 , **lowercase , ) -> int:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_tpu_fourier_optimizations
lowerCAmelCase = tpu_short_seq_length
| 532 | 1 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__UpperCAmelCase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
__UpperCAmelCase = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
)
__UpperCAmelCase = '''|'''.join(sys.argv[1:])
__UpperCAmelCase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
__UpperCAmelCase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 40 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def snake_case ( A__ ):
def wrapper(*A__ ,**A__ ):
UpperCAmelCase_ : Union[str, Any] = timeit.default_timer()
UpperCAmelCase_ : Union[str, Any] = func(*A__ ,**A__ )
UpperCAmelCase_ : Dict = timeit.default_timer() - starttime
return delta
UpperCAmelCase_ : Optional[Any] = func.__name__
return wrapper
def snake_case ( A__ ,A__=1_00 ,A__=None ):
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[str] = seq_shapes or {}
for i in range(A__ ):
UpperCAmelCase_ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(A__ ,_ArrayXD ):
UpperCAmelCase_ : Optional[int] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(A__ ,datasets.Value ):
if v.dtype == "string":
UpperCAmelCase_ : List[str] = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase_ : List[str] = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(A__ ,datasets.Sequence ):
while isinstance(A__ ,datasets.Sequence ):
UpperCAmelCase_ : List[str] = v.feature
UpperCAmelCase_ : List[Any] = seq_shapes[k]
UpperCAmelCase_ : Dict = np.random.rand(*A__ ).astype(v.dtype )
UpperCAmelCase_ : Dict = data
dummy_data.append((i, example) )
return dummy_data
def snake_case ( A__ ,A__ ,A__=1_00 ,A__=None ):
UpperCAmelCase_ : Optional[Any] = generate_examples(A__ ,num_examples=A__ ,seq_shapes=A__ )
with ArrowWriter(features=A__ ,path=A__ ) as writer:
for key, record in dummy_data:
UpperCAmelCase_ : Any = features.encode_example(A__ )
writer.write(A__ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_file(filename=A__ ,info=datasets.DatasetInfo(features=A__ ) )
return dataset
| 95 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = KandinskyVaaPriorPipeline
__magic_name__ : Dict = ["prompt"]
__magic_name__ : List[str] = ["prompt", "negative_prompt"]
__magic_name__ : Union[str, Any] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
__magic_name__ : Union[str, Any] = False
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
return 3_2
@property
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
return 3_2
@property
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
return 1_0_0
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
torch.manual_seed(0)
a_ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__A)
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
torch.manual_seed(0)
a_ ={
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
a_ =PriorTransformer(**__A)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a_ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
a_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
a_ =CLIPVisionModelWithProjection(__A)
return model
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__A , do_normalize=__A , do_resize=__A , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_2_4 , )
return image_processor
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.dummy_prior
a_ =self.dummy_image_encoder
a_ =self.dummy_text_encoder
a_ =self.dummy_tokenizer
a_ =self.dummy_image_processor
a_ =UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=__A , clip_sample_range=1_0.0 , )
a_ ={
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=0) -> Tuple:
"""simple docstring"""
if str(__A).startswith("mps"):
a_ =torch.manual_seed(__A)
else:
a_ =torch.Generator(device=__A).manual_seed(__A)
a_ ={
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ ="cpu"
a_ =self.get_dummy_components()
a_ =self.pipeline_class(**__A)
a_ =pipe.to(__A)
pipe.set_progress_bar_config(disable=__A)
a_ =pipe(**self.get_dummy_inputs(__A))
a_ =output.image_embeds
a_ =pipe(
**self.get_dummy_inputs(__A) , return_dict=__A , )[0]
a_ =image[0, -1_0:]
a_ =image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
a_ =np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =torch_device == "cpu"
a_ =True
a_ =False
self._test_inference_batch_single_identical(
test_max_difference=__A , relax_max_difference=__A , test_mean_pixel_difference=__A , )
@skip_mps
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =torch_device == "cpu"
a_ =False
self._test_attention_slicing_forward_pass(
test_max_difference=__A , test_mean_pixel_difference=__A , )
| 711 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41 | 0 |
SCREAMING_SNAKE_CASE__ : Optional[int] = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
SCREAMING_SNAKE_CASE__ : Dict = ["a", "b", "c", "d", "e"]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = start
# add current to visited
visited.append(__lowerCamelCase )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# return sort
return sort
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = topological_sort("a", [], [])
print(sort)
| 298 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_snake_case : Optional[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_snake_case : Dict = "UperNetConfig"
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Union[int, Tuple[int, int]] , lowerCamelCase : Union[int, Tuple[int, int], str] = 0 , lowerCamelCase : bool = False , lowerCamelCase : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
__snake_case : Union[str, Any] = nn.Convad(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , kernel_size=lowerCamelCase , padding=lowerCamelCase , bias=lowerCamelCase , dilation=lowerCamelCase , )
__snake_case : Dict = nn.BatchNormad(lowerCamelCase )
__snake_case : List[Any] = nn.ReLU()
def __snake_case ( self : List[Any] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
__snake_case : Dict = self.conv(lowerCamelCase )
__snake_case : int = self.batch_norm(lowerCamelCase )
__snake_case : Optional[Any] = self.activation(lowerCamelCase )
return output
class a (nn.Module ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> None:
super().__init__()
__snake_case : Tuple = [
nn.AdaptiveAvgPoolad(lowerCamelCase ),
UperNetConvModule(lowerCamelCase , lowerCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : Dict , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
__snake_case : List[str] = input
for layer in self.layers:
__snake_case : Tuple = layer(lowerCamelCase )
return hidden_state
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Tuple[int, ...] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : bool ) -> None:
super().__init__()
__snake_case : Dict = pool_scales
__snake_case : List[str] = align_corners
__snake_case : List[Any] = in_channels
__snake_case : str = channels
__snake_case : Optional[Any] = []
for i, pool_scale in enumerate(lowerCamelCase ):
__snake_case : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase , in_channels=lowerCamelCase , channels=lowerCamelCase )
self.blocks.append(lowerCamelCase )
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : int , lowerCamelCase : torch.Tensor ) -> List[torch.Tensor]:
__snake_case : Tuple = []
for ppm in self.blocks:
__snake_case : Any = ppm(lowerCamelCase )
__snake_case : List[Any] = nn.functional.interpolate(
lowerCamelCase , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(lowerCamelCase )
return ppm_outs
class a (nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : str , lowerCamelCase : str ) -> List[str]:
super().__init__()
__snake_case : Dict = config
__snake_case : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
__snake_case : Tuple = in_channels
__snake_case : str = config.hidden_size
__snake_case : List[str] = False
__snake_case : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__snake_case : Tuple = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__snake_case : List[str] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__snake_case : List[Any] = nn.ModuleList()
__snake_case : Dict = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__snake_case : Union[str, Any] = UperNetConvModule(lowerCamelCase , self.channels , kernel_size=1 )
__snake_case : Optional[int] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowerCamelCase )
self.fpn_convs.append(lowerCamelCase )
__snake_case : int = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __snake_case ( self : List[str] ) -> Optional[Any]:
self.apply(self._init_weights )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> str:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : List[Any] , lowerCamelCase : Tuple ) -> Optional[int]:
__snake_case : str = inputs[-1]
__snake_case : int = [x]
psp_outs.extend(self.psp_modules(lowerCamelCase ) )
__snake_case : Tuple = torch.cat(lowerCamelCase , dim=1 )
__snake_case : Union[str, Any] = self.bottleneck(lowerCamelCase )
return output
def __snake_case ( self : int , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# build laterals
__snake_case : Any = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCamelCase ) )
# build top-down path
__snake_case : Dict = len(lowerCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case : Union[str, Any] = laterals[i - 1].shape[2:]
__snake_case : Optional[int] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCamelCase , mode="bilinear" , align_corners=self.align_corners )
# build outputs
__snake_case : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case : Tuple = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
__snake_case : str = torch.cat(lowerCamelCase , dim=1 )
__snake_case : Optional[Any] = self.fpn_bottleneck(lowerCamelCase )
__snake_case : Tuple = self.classifier(lowerCamelCase )
return output
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int = 2 , lowerCamelCase : int = 3 , lowerCamelCase : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
__snake_case : List[Any] = config
__snake_case : List[str] = config.auxiliary_in_channels
__snake_case : List[Any] = config.auxiliary_channels
__snake_case : Tuple = config.auxiliary_num_convs
__snake_case : int = config.auxiliary_concat_input
__snake_case : Optional[int] = in_index
__snake_case : Tuple = (kernel_size // 2) * dilation
__snake_case : Optional[int] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
if self.num_convs == 0:
__snake_case : Union[str, Any] = nn.Identity()
else:
__snake_case : Any = nn.Sequential(*lowerCamelCase )
if self.concat_input:
__snake_case : int = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCamelCase , padding=kernel_size // 2 )
__snake_case : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __snake_case ( self : Dict ) -> Optional[Any]:
self.apply(self._init_weights )
def __snake_case ( self : Tuple , lowerCamelCase : Tuple ) -> Optional[int]:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : Optional[int] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
__snake_case : List[str] = encoder_hidden_states[self.in_index]
__snake_case : Optional[Any] = self.convs(lowerCamelCase )
if self.concat_input:
__snake_case : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__snake_case : Union[str, Any] = self.classifier(lowerCamelCase )
return output
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = UperNetConfig
__UpperCAmelCase : int = "pixel_values"
__UpperCAmelCase : str = True
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any] ) -> List[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __snake_case ( self : Optional[Any] ) -> List[str]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=False ) -> Dict:
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Union[str, Any] = value
_snake_case : Dict = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case : Tuple = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , _lowerCAmelCase , )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : int ) -> Optional[int]:
super().__init__(lowerCamelCase )
__snake_case : Any = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__snake_case : Union[str, Any] = UperNetHead(lowerCamelCase , in_channels=self.backbone.channels )
__snake_case : Any = UperNetFCNHead(lowerCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
__snake_case : Any = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : str = output_attentions if output_attentions is not None else self.config.output_attentions
__snake_case : Tuple = self.backbone.forward_with_filtered_kwargs(
lowerCamelCase , output_hidden_states=lowerCamelCase , output_attentions=lowerCamelCase )
__snake_case : List[Any] = outputs.feature_maps
__snake_case : List[Any] = self.decode_head(lowerCamelCase )
__snake_case : List[str] = nn.functional.interpolate(lowerCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : Optional[int] = None
if self.auxiliary_head is not None:
__snake_case : Dict = self.auxiliary_head(lowerCamelCase )
__snake_case : Dict = nn.functional.interpolate(
lowerCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : int = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
__snake_case : Any = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__snake_case : Union[str, Any] = loss_fct(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[Any] = loss_fct(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__snake_case : Any = (logits,) + outputs[1:]
else:
__snake_case : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 81 | 0 |
def __UpperCAmelCase ( __a : Tuple ,__a : Tuple ) -> Any:
"""simple docstring"""
return number | (1 << position)
def __UpperCAmelCase ( __a : Dict ,__a : Any ) -> List[str]:
"""simple docstring"""
return number & ~(1 << position)
def __UpperCAmelCase ( __a : str ,__a : int ) -> Optional[int]:
"""simple docstring"""
return number ^ (1 << position)
def __UpperCAmelCase ( __a : Tuple ,__a : List[Any] ) -> Tuple:
"""simple docstring"""
return ((number >> position) & 1) == 1
def __UpperCAmelCase ( __a : Any ,__a : int ) -> Any:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
def __UpperCAmelCase ( __a : int = 10 ,__a : int = 22 ) -> int:
"""simple docstring"""
_a : Union[str, Any] = range(1 ,__a )
_a : Optional[Any] = range(1 ,__a )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 578 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = HfArgumentParser(UpperCAmelCase__ )
a_ = parser.parse_args_into_dataclasses()[0]
a_ = TensorFlowBenchmark(args=UpperCAmelCase__ )
try:
a_ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a_ = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
a_ = """ """.join(str(UpperCAmelCase__ ).split(""" """ )[:-1] )
a_ = """"""
a_ = eval(str(UpperCAmelCase__ ).split(""" """ )[-1] )
a_ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
a_ = full_error_msg + begin_error_msg + str(UpperCAmelCase__ )
raise ValueError(UpperCAmelCase__ )
benchmark.run()
if __name__ == "__main__":
main() | 483 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowercase_ :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize ):
"""simple docstring"""
a_ = """bilinear"""
a_ = max_size
a_ = short_edge_length
def __call__( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = []
for img in imgs:
a_ , a_ = img.shape[:2]
# later: provide list and randomly choose index for resize
a_ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
a_ = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase )
if h < w:
a_ , a_ = size, scale * w
else:
a_ , a_ = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase ) > self.max_size:
a_ = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase )
a_ = newh * scale
a_ = neww * scale
a_ = int(neww + 0.5 )
a_ = int(newh + 0.5 )
if img.dtype == np.uinta:
a_ = Image.fromarray(_UpperCAmelCase )
a_ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
a_ = np.asarray(_UpperCAmelCase )
else:
a_ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
a_ = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase ).squeeze(0 )
img_augs.append(_UpperCAmelCase )
return img_augs
class lowercase_ :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
a_ = cfg.INPUT.FORMAT
a_ = cfg.SIZE_DIVISIBILITY
a_ = cfg.PAD_VALUE
a_ = cfg.INPUT.MAX_SIZE_TEST
a_ = cfg.MODEL.DEVICE
a_ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a_ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a_ = lambda _UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = tuple(max(_UpperCAmelCase ) for s in zip(*[img.shape for img in images] ) )
a_ = [im.shape[-2:] for im in images]
a_ = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase )
]
return torch.stack(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = [images]
if single_image:
assert len(_UpperCAmelCase ) == 1
for i in range(len(_UpperCAmelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
a_ = torch.tensor([im.shape[:2] for im in images] )
a_ = self.aug(_UpperCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
a_ = [self.normalizer(_UpperCAmelCase ) for x in images]
# now pad them to do the following operations
a_ , a_ = self.pad(_UpperCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
a_ = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
assert torch.isfinite(UpperCAmelCase__ ).all(), "Box tensor contains infinite or NaN!"
a_ , a_ = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCAmelCase__ )
tensor[:, 1].clamp_(min=0 , max=UpperCAmelCase__ )
tensor[:, 2].clamp_(min=0 , max=UpperCAmelCase__ )
tensor[:, 3].clamp_(min=0 , max=UpperCAmelCase__ ) | 483 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list ):
'''simple docstring'''
_enforce_args(snake_case__ , snake_case__ )
if n == 0:
return 0
lowerCamelCase_ = float('-inf' )
for i in range(1 , n + 1 ):
lowerCamelCase_ = max(
snake_case__ , prices[i - 1] + naive_cut_rod_recursive(n - i , snake_case__ ) )
return max_revue
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list ):
'''simple docstring'''
_enforce_args(snake_case__ , snake_case__ )
lowerCamelCase_ = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case__ , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list , lowercase : list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCamelCase_ = float('-inf' )
for i in range(1 , n + 1 ):
lowerCamelCase_ = max(
snake_case__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , snake_case__ , snake_case__ ) , )
lowerCamelCase_ = max_revenue
return max_rev[n]
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list ):
'''simple docstring'''
_enforce_args(snake_case__ , snake_case__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCamelCase_ = [float('-inf' ) for _ in range(n + 1 )]
lowerCamelCase_ = 0
for i in range(1 , n + 1 ):
lowerCamelCase_ = max_rev[i]
for j in range(1 , i + 1 ):
lowerCamelCase_ = max(snake_case__ , prices[j - 1] + max_rev[i - j] )
lowerCamelCase_ = max_revenue_i
return max_rev[n]
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list ):
'''simple docstring'''
if n < 0:
lowerCamelCase_ = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(snake_case__ )
if n > len(snake_case__ ):
lowerCamelCase_ = (
'Each integral piece of rod must have a corresponding price. '
f"""Got n = {n} but length of prices = {len(snake_case__ )}"""
)
raise ValueError(snake_case__ )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = [6, 10, 12, 15, 20, 23]
lowerCamelCase_ = len(snake_case__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCamelCase_ = 36
lowerCamelCase_ = top_down_cut_rod(snake_case__ , snake_case__ )
lowerCamelCase_ = bottom_up_cut_rod(snake_case__ , snake_case__ )
lowerCamelCase_ = naive_cut_rod_recursive(snake_case__ , snake_case__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 717 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase_ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ):
'''simple docstring'''
lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 651 | 0 |
def A ( lowercase__ : list ) -> bool:
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(lowercase__ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(lowercase__ ) == 1:
return True
UpperCamelCase__ :Tuple = series[1] - series[0]
for index in range(len(lowercase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def A ( lowercase__ : list ) -> float:
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(lowercase__ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
UpperCamelCase__ :Union[str, Any] = 0
for val in series:
answer += val
return answer / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 45 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCAmelCase__ : List[str] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase_ ( _snake_case ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase_ ( _snake_case ,_snake_case ):
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
elif args.student_type == "gpt2":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def lowercase_ ( _snake_case ,_snake_case ):
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" ,action="""store_true""" ,help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" ,type=_snake_case ,required=_snake_case ,help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" ,type=_snake_case ,required=_snake_case ,help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" ,)
parser.add_argument(
"""--student_type""" ,type=_snake_case ,choices=["""distilbert""", """roberta""", """gpt2"""] ,required=_snake_case ,help="""The student type (DistilBERT, RoBERTa).""" ,)
parser.add_argument("""--student_config""" ,type=_snake_case ,required=_snake_case ,help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" ,default=_snake_case ,type=_snake_case ,help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" ,choices=["""bert""", """roberta""", """gpt2"""] ,required=_snake_case ,help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" ,type=_snake_case ,required=_snake_case ,help="""The teacher model.""" )
parser.add_argument("""--temperature""" ,default=2.0 ,type=_snake_case ,help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" ,default=0.5 ,type=_snake_case ,help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" ,default=0.0 ,type=_snake_case ,help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" ,)
parser.add_argument("""--alpha_clm""" ,default=0.5 ,type=_snake_case ,help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" ,default=0.0 ,type=_snake_case ,help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" ,default=0.0 ,type=_snake_case ,help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" ,action="""store_true""" ,help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" ,default=0.15 ,type=_snake_case ,help="""Proportion of tokens for which we need to make a prediction.""" ,)
parser.add_argument("""--word_mask""" ,default=0.8 ,type=_snake_case ,help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" ,default=0.1 ,type=_snake_case ,help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" ,default=0.1 ,type=_snake_case ,help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" ,default=0.7 ,type=_snake_case ,help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" ,)
parser.add_argument("""--token_counts""" ,type=_snake_case ,help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" ,action="""store_true""" ,help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" ,)
parser.add_argument(
"""--freeze_pos_embs""" ,action="""store_true""" ,help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" ,)
parser.add_argument(
"""--freeze_token_type_embds""" ,action="""store_true""" ,help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" ,)
parser.add_argument("""--n_epoch""" ,type=_snake_case ,default=3 ,help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" ,type=_snake_case ,default=5 ,help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" ,action="""store_false""" ,help="""If true, group sequences that have similar length into the same batch. Default is true.""" ,)
parser.add_argument(
"""--gradient_accumulation_steps""" ,type=_snake_case ,default=50 ,help="""Gradient accumulation for larger training batches.""" ,)
parser.add_argument("""--warmup_prop""" ,default=0.05 ,type=_snake_case ,help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" ,default=0.0 ,type=_snake_case ,help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" ,default=5E-4 ,type=_snake_case ,help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" ,default=1E-6 ,type=_snake_case ,help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" ,default=5.0 ,type=_snake_case ,help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" ,default=0.02 ,type=_snake_case ,help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" ,action="""store_true""" ,help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" ,)
parser.add_argument(
"""--fp16_opt_level""" ,type=_snake_case ,default="""O1""" ,help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) ,)
parser.add_argument("""--n_gpu""" ,type=_snake_case ,default=1 ,help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" ,type=_snake_case ,default=-1 ,help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" ,type=_snake_case ,default=56 ,help="""Random seed""" )
parser.add_argument("""--log_interval""" ,type=_snake_case ,default=500 ,help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" ,type=_snake_case ,default=4_000 ,help="""Checkpoint interval.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
sanity_checks(_snake_case )
# ARGS #
init_gpu_params(_snake_case )
set_seed(_snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path ,"""parameters.json""" ) ,"""w""" ) as f:
json.dump(vars(_snake_case ) ,_snake_case ,indent=4 )
git_log(args.dump_path )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = MODEL_CLASSES[args.student_type]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
SCREAMING_SNAKE_CASE__ : str = teacher_tokenizer_class.from_pretrained(args.teacher_name )
SCREAMING_SNAKE_CASE__ : int = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
SCREAMING_SNAKE_CASE__ : str = tokenizer.all_special_tokens.index(_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
SCREAMING_SNAKE_CASE__ : List[Any] = special_tok_ids
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file ,"""rb""" ) as fp:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pickle.load(_snake_case )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts ,"""rb""" ) as fp:
SCREAMING_SNAKE_CASE__ : List[Any] = pickle.load(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = np.maximum(_snake_case ,1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
SCREAMING_SNAKE_CASE__ : Optional[int] = 0.0 # do not predict special tokens
SCREAMING_SNAKE_CASE__ : int = torch.from_numpy(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : Any = LmSeqsDataset(params=_snake_case ,data=_snake_case )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
SCREAMING_SNAKE_CASE__ : Tuple = student_config_class.from_pretrained(args.student_config )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = student_model_class.from_pretrained(args.student_pretrained_weights ,config=_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Tuple = student_model_class(_snake_case )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("""Student loaded.""" )
# TEACHER #
SCREAMING_SNAKE_CASE__ : str = teacher_model_class.from_pretrained(args.teacher_name ,output_hidden_states=_snake_case )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_snake_case ,_snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_snake_case ,_snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE__ : int = Distiller(
params=_snake_case ,dataset=_snake_case ,token_probs=_snake_case ,student=_snake_case ,teacher=_snake_case )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 223 | 0 |
from math import sqrt
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
__UpperCamelCase : Tuple = 0
for i in range(1 , int(sqrt(__lowerCAmelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__lowerCAmelCase ):
total += i + n // i
elif i == sqrt(__lowerCAmelCase ):
total += i
return total - n
def __lowerCamelCase ( __lowerCAmelCase : int = 10000 ) -> int:
__UpperCamelCase : List[Any] = sum(
i
for i in range(1 , __lowerCAmelCase )
if sum_of_divisors(sum_of_divisors(__lowerCAmelCase ) ) == i and sum_of_divisors(__lowerCAmelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 704 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'spiece.model'}
UpperCamelCase = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCamelCase = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class _A ( UpperCAmelCase_ ):
lowercase_ : Tuple = VOCAB_FILES_NAMES
lowercase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=False , lowerCamelCase__ : int=False , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : Optional[Any] , ):
"""simple docstring"""
__UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase : str = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
__UpperCamelCase : List[Any] = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__UpperCamelCase : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token
__UpperCamelCase : Dict = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__UpperCamelCase : Dict = unk_token if pad_token is None else pad_token
__UpperCamelCase : Any = eos_token if bos_token is None else bos_token
else:
__UpperCamelCase : List[Any] = """<pad>""" if pad_token is None else pad_token
__UpperCamelCase : str = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
__UpperCamelCase : List[str] = do_lower_case
__UpperCamelCase : List[str] = remove_space
__UpperCamelCase : Tuple = keep_accents
__UpperCamelCase : Dict = vocab_file
__UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
__UpperCamelCase : Optional[int] = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__UpperCamelCase : str = re.compile(
f'[{"".join(map(lowerCamelCase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]' )
def __getstate__( self : str ):
"""simple docstring"""
__UpperCamelCase : Dict = self.__dict__.copy()
__UpperCamelCase : List[str] = None
return state
def __setstate__( self : Tuple , lowerCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCamelCase : Tuple = {}
__UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def a ( self : Tuple ):
"""simple docstring"""
return len(self.sp_model )
def a ( self : Tuple , lowerCamelCase__ : str ):
"""simple docstring"""
__UpperCamelCase : List[str] = self.non_printing_characters_re.sub("""""" , lowerCamelCase__ )
# Normalize whitespaces
__UpperCamelCase : int = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
__UpperCamelCase : Dict = unicodedata.normalize("""NFC""" , lowerCamelCase__ )
return text
def a ( self : Optional[Any] , lowerCamelCase__ : str , **lowerCamelCase__ : Dict ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.preprocess_text(lowerCamelCase__ )
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def a ( self : Dict , lowerCamelCase__ : str ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCamelCase__ )
def a ( self : Optional[int] , lowerCamelCase__ : int ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCamelCase__ )
@staticmethod
def a ( lowerCamelCase__ : str ):
"""simple docstring"""
return out_string
def a ( self : Optional[Any] , lowerCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Tuple = """"""
__UpperCamelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
__UpperCamelCase : int = True
__UpperCamelCase : int = []
else:
current_sub_tokens.append(lowerCamelCase__ )
__UpperCamelCase : Dict = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string
def a ( self : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : List[str] = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
__UpperCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
def a ( self : List[str] , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : Union[str, bool] = False ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : List[Any] = self.preprocess_text(lowerCamelCase__ )
__UpperCamelCase : Optional[int] = self.sp_model.encode(lowerCamelCase__ )
else:
__UpperCamelCase : Any = [self.preprocess_text(lowerCamelCase__ ) for t in text]
__UpperCamelCase : Optional[int] = self.sp_model.encode(lowerCamelCase__ )
if return_tensors is True or return_tensors == "pt":
__UpperCamelCase : str = torch.tensor(lowerCamelCase__ )
return token_ids
def a ( self : Optional[int] , lowerCamelCase__ : Union[int, List[int]] ):
"""simple docstring"""
return self.sp_model.decode(lowerCamelCase__ )
def a ( self : Optional[Any] , lowerCamelCase__ : "Conversation" ):
"""simple docstring"""
__UpperCamelCase : Tuple = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
__UpperCamelCase : Any = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowerCamelCase__ ) + f'{self.bos_token}Bot:'
)
return self.encode(text=lowerCamelCase__ )
| 515 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__A = logging.getLogger(__name__)
@dataclass
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
lowerCamelCase : bool = field(default=UpperCamelCase , metadata={'help': 'Whether to SortishSamler or not.'} )
lowerCamelCase : bool = field(
default=UpperCamelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase : bool = field(default=UpperCamelCase , metadata={'help': 'whether to use adafactor'} )
lowerCamelCase : Optional[float] = field(
default=UpperCamelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
lowerCamelCase : Optional[float] = field(
default=UpperCamelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
lowerCamelCase : Optional[float] = field(default=UpperCamelCase , metadata={'help': 'Dropout probability. Goes into model.config.'} )
lowerCamelCase : Optional[float] = field(
default=UpperCamelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
lowerCamelCase : Optional[str] = field(
default='linear' , metadata={'help': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 68 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : Dict =BertJapaneseTokenizer
lowercase : Union[str, Any] =False
lowercase : List[str] =True
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
'''simple docstring'''
super().setUp()
UpperCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase = "こんにちは、世界。 \nこんばんは、世界。"
UpperCAmelCase = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.get_input_output_texts(UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return text, ids
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class(self.vocab_file )
UpperCAmelCase = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(UpperCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(UpperCamelCase__ )
UpperCAmelCase = "こんにちは、世界。\nこんばんは、世界。"
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase__ , "wb" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as handle:
UpperCAmelCase = pickle.load(UpperCamelCase__ )
UpperCAmelCase = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
'''simple docstring'''
try:
UpperCAmelCase = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
try:
UpperCAmelCase = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = MecabTokenizer(do_lower_case=UpperCamelCase__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str:
'''simple docstring'''
try:
UpperCAmelCase = MecabTokenizer(
do_lower_case=UpperCamelCase__ , normalize_text=UpperCamelCase__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = MecabTokenizer(normalize_text=UpperCamelCase__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(UpperCamelCase__ )
UpperCAmelCase = "こんにちは、世界。\nこんばんは、世界。"
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase__ , "wb" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as handle:
UpperCAmelCase = pickle.load(UpperCamelCase__ )
UpperCAmelCase = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = SudachiTokenizer(do_lower_case=UpperCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = SudachiTokenizer(normalize_text=UpperCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = SudachiTokenizer(trim_whitespace=UpperCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(UpperCamelCase__ )
UpperCAmelCase = "こんにちは、世界。\nこんばんは、世界。"
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase__ , "wb" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as handle:
UpperCAmelCase = pickle.load(UpperCamelCase__ )
UpperCAmelCase = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase = JumanppTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase = JumanppTokenizer(normalize_text=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase = JumanppTokenizer(trim_whitespace=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
UpperCAmelCase = {}
for i, token in enumerate(UpperCamelCase__ ):
UpperCAmelCase = i
UpperCAmelCase = WordpieceTokenizer(vocab=UpperCamelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
UpperCAmelCase = tokenizer.subword_tokenizer
UpperCAmelCase = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(UpperCamelCase__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
UpperCAmelCase = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(UpperCamelCase__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
UpperCAmelCase = tokenizer.encode("ありがとう。" , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : Optional[int] =BertJapaneseTokenizer
lowercase : List[str] =False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_ ( self : str , **UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = "こんにちは、世界。 \nこんばんは、世界。"
UpperCAmelCase = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
UpperCAmelCase = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
UpperCamelCase__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
UpperCAmelCase = {}
for i, token in enumerate(UpperCamelCase__ ):
UpperCAmelCase = i
UpperCAmelCase = CharacterTokenizer(vocab=UpperCamelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
UpperCAmelCase = tokenizer.encode("ありがとう。" , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase = "cl-tohoku/bert-base-japanese"
UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(UpperCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
UpperCAmelCase = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 323 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase : Union[str, Any] = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : int) -> Union[str, Any]:
'''simple docstring'''
if os.path.exists(_lowerCamelCase):
if os.path.exists(os.path.join(_lowerCamelCase , "config.json")) and os.path.isfile(
os.path.join(_lowerCamelCase , "config.json")):
os.remove(os.path.join(_lowerCamelCase , "config.json"))
if os.path.exists(os.path.join(_lowerCamelCase , "pytorch_model.bin")) and os.path.isfile(
os.path.join(_lowerCamelCase , "pytorch_model.bin")):
os.remove(os.path.join(_lowerCamelCase , "pytorch_model.bin"))
else:
os.makedirs(_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Optional[int]=False) -> Any:
'''simple docstring'''
__UpperCamelCase : str = 2
if unlogit:
__UpperCamelCase : Tuple = torch.pow(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : Optional[int] = p * torch.log(_lowerCamelCase)
__UpperCamelCase : Optional[int] = 0
return -plogp.sum(dim=-1)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> Union[str, Any]:
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(F'{x + 1}' for x in range(len(_lowerCamelCase))))
for row in range(len(_lowerCamelCase)):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + "\t".join(F'{x:.5f}' for x in tensor[row].cpu().data))
else:
logger.info(F'layer {row + 1}:\t' + "\t".join(F'{x:d}' for x in tensor[row].cpu().data))
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Any=True , _lowerCamelCase : Dict=True , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=False) -> int:
'''simple docstring'''
__UpperCamelCase : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
__UpperCamelCase : Optional[Any] = torch.zeros(_lowerCamelCase , _lowerCamelCase).to(args.device)
__UpperCamelCase : Union[str, Any] = torch.zeros(_lowerCamelCase , _lowerCamelCase).to(args.device)
if head_mask is None:
__UpperCamelCase : str = torch.ones(_lowerCamelCase , _lowerCamelCase).to(args.device)
head_mask.requires_grad_(requires_grad=_lowerCamelCase)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : Dict = 0.0
__UpperCamelCase : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(_lowerCamelCase , desc="Iteration" , disable=args.local_rank not in [-1, 0])):
__UpperCamelCase : int = tuple(t.to(args.device) for t in inputs)
(__UpperCamelCase ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__UpperCamelCase : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , head_mask=_lowerCamelCase)
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__UpperCamelCase : Optional[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowerCamelCase):
__UpperCamelCase : Tuple = entropy(attn.detach() , _lowerCamelCase)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowerCamelCase).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Dict = torch.pow(torch.pow(_lowerCamelCase , _lowerCamelCase).sum(-1) , 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
__UpperCamelCase : Optional[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies")
print_ad_tensor(_lowerCamelCase)
if compute_importance:
logger.info("Head importance scores")
print_ad_tensor(_lowerCamelCase)
logger.info("Head ranked by importance scores")
__UpperCamelCase : Any = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device)
__UpperCamelCase : int = torch.arange(
head_importance.numel() , device=args.device)
__UpperCamelCase : int = head_ranks.view_as(_lowerCamelCase)
print_ad_tensor(_lowerCamelCase)
return attn_entropy, head_importance, total_loss
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : str) -> str:
'''simple docstring'''
__UpperCamelCase : Tuple = compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase)
__UpperCamelCase : int = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , _lowerCamelCase , original_score * args.masking_threshold)
__UpperCamelCase : int = torch.ones_like(_lowerCamelCase)
__UpperCamelCase : List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount))
__UpperCamelCase : Dict = original_score
while current_score >= original_score * args.masking_threshold:
__UpperCamelCase : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__UpperCamelCase : Optional[int] = float("Inf")
__UpperCamelCase : Optional[Any] = head_importance.view(-1).sort()[1]
if len(_lowerCamelCase) <= num_to_mask:
print("BREAK BY num_to_mask")
break
# mask heads
__UpperCamelCase : Tuple = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist()))
__UpperCamelCase : int = new_head_mask.view(-1)
__UpperCamelCase : Optional[Any] = 0.0
__UpperCamelCase : Optional[Any] = new_head_mask.view_as(_lowerCamelCase)
__UpperCamelCase : Any = new_head_mask.clone().detach()
print_ad_tensor(_lowerCamelCase)
# Compute metric and head importance again
__UpperCamelCase : Union[str, Any] = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , head_mask=_lowerCamelCase)
__UpperCamelCase : Optional[Any] = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , _lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask")
print_ad_tensor(_lowerCamelCase)
np.save(os.path.join(args.output_dir , "head_mask.npy") , head_mask.detach().cpu().numpy())
return head_mask
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : Tuple) -> str:
'''simple docstring'''
__UpperCamelCase : int = datetime.now()
__UpperCamelCase : List[str] = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase)
__UpperCamelCase : int = 1 / loss
__UpperCamelCase : Optional[Any] = datetime.now() - before_time
__UpperCamelCase : Tuple = sum(p.numel() for p in model.parameters())
__UpperCamelCase : Any = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCamelCase))
}
for k, v in heads_to_prune.items():
if isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Optional[Any] = [
v,
]
assert sum(len(_lowerCamelCase) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = sum(p.numel() for p in model.parameters())
__UpperCamelCase : Dict = datetime.now()
__UpperCamelCase : List[Any] = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase , actually_pruned=_lowerCamelCase , )
__UpperCamelCase : Optional[int] = 1 / loss
__UpperCamelCase : Optional[int] = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , _lowerCamelCase , _lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , _lowerCamelCase , _lowerCamelCase)
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100)
save_model(_lowerCamelCase , args.output_dir)
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=_lowerCamelCase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=_lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=_lowerCamelCase , type=_lowerCamelCase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=_lowerCamelCase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances.")
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory")
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers")
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy.")
parser.add_argument(
"--masking_threshold" , default=0.9 , type=_lowerCamelCase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=_lowerCamelCase , help="Amount to heads to masking at each masking step.")
parser.add_argument("--metric_name" , default="acc" , type=_lowerCamelCase , help="Metric to use for head masking.")
parser.add_argument(
"--max_seq_length" , default=128 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=_lowerCamelCase , help="Batch size.")
parser.add_argument("--seed" , type=_lowerCamelCase , default=42)
parser.add_argument("--local_rank" , type=_lowerCamelCase , default=-1 , help="local_rank for distributed training on gpus")
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available")
parser.add_argument("--server_ip" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging.")
parser.add_argument("--server_port" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging.")
__UpperCamelCase : Dict = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCamelCase)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__UpperCamelCase : List[str] = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
__UpperCamelCase : List[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
__UpperCamelCase : Optional[Any] = torch.device("cuda" , args.local_rank)
__UpperCamelCase : Optional[int] = 1
torch.distributed.init_process_group(backend="nccl") # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1)))
__UpperCamelCase : List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
__UpperCamelCase : List[Any] = nn.parallel.DistributedDataParallel(
_lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowerCamelCase)
elif args.n_gpu > 1:
__UpperCamelCase : Union[str, Any] = nn.DataParallel(_lowerCamelCase)
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowerCamelCase)
torch.save(_lowerCamelCase , os.path.join(args.output_dir , "run_args.bin"))
logger.info("Training/evaluation parameters %s" , _lowerCamelCase)
# Prepare dataset
__UpperCamelCase : Union[str, Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa),
])
__UpperCamelCase : str = (torch.from_numpy(_lowerCamelCase),)
__UpperCamelCase : int = TensorDataset(*_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = RandomSampler(_lowerCamelCase)
__UpperCamelCase : List[str] = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase , batch_size=args.batch_size)
# Compute head entropy and importance score
compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__UpperCamelCase : int = mask_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
prune_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main() | 711 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( *a :Union[str, Any] , **a :int ) -> str:
pass
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[int] = hashlib.mda(image.tobytes())
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
_A = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _lowerCamelCase ( self :Union[str, Any] , a :Optional[Any] , a :Optional[Any] , a :Optional[int] ) -> Optional[Any]:
__UpperCamelCase : Any = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowerCamelCase ( self :Union[str, Any] , a :Optional[Any] , a :List[Any] ) -> Dict:
__UpperCamelCase : str = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , a )
import datasets
__UpperCamelCase : List[Any] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__UpperCamelCase : int = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def _lowerCamelCase ( self :str ) -> Optional[Any]:
pass
@slow
@require_torch
def _lowerCamelCase ( self :List[Any] ) -> List[Any]:
__UpperCamelCase : Dict = "Intel/dpt-large"
__UpperCamelCase : int = pipeline("depth-estimation" , model=a )
__UpperCamelCase : Optional[int] = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
__UpperCamelCase : Optional[int] = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def _lowerCamelCase ( self :List[Any] ) -> List[str]:
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" ) | 94 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a__ : Any = logging.get_logger(__name__)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
lowercase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ = math.ceil(val / multiple ) * multiple
return x
lowercase__ = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
lowercase__, lowercase__ = get_image_size(A__ )
lowercase__, lowercase__ = output_size
# determine new height and width
lowercase__ = output_height / input_height
lowercase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ = scale_width
else:
# fit height
lowercase__ = scale_height
lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[int] = ["pixel_values"]
def __init__( self : Optional[Any] , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : bool = False , lowerCAmelCase : int = 1 , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 2_55 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , **lowerCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = size if size is not None else {'height': 3_84, 'width': 3_84}
lowercase__ = get_size_dict(lowerCAmelCase)
lowercase__ = do_resize
lowercase__ = size
lowercase__ = keep_aspect_ratio
lowercase__ = ensure_multiple_of
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self : int , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : bool = False , lowerCAmelCase : int = 1 , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Tuple , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(lowerCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''')
lowercase__ = get_resize_output_image_size(
lowerCAmelCase , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowerCAmelCase , multiple=lowerCAmelCase , )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Optional[int] , ) -> Dict:
"""simple docstring"""
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : int = None , lowerCAmelCase : bool = None , lowerCAmelCase : int = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(lowerCAmelCase)
lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(lowerCAmelCase)
if not valid_images(lowerCAmelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(lowerCAmelCase) for image in images]
if do_resize:
lowercase__ = [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase) for image in images]
lowercase__ = [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase) for image in images]
lowercase__ = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Tuple] = None) -> str:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase) != len(lowerCAmelCase):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowerCAmelCase):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(lowerCAmelCase)):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowerCAmelCase)
lowercase__ = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowerCAmelCase)
else:
lowercase__ = logits.argmax(dim=1)
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 622 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = CTRLTokenizer
A : Tuple = False
A : Any = False
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
lowercase__ = {'unk_token': '<unk>'}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowerCAmelCase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowerCAmelCase))
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 'adapt react readapt apt'
lowercase__ = 'adapt react readapt apt'
return input_text, output_text
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
lowercase__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
lowercase__ = 'adapt react readapt apt'
lowercase__ = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , lowerCAmelCase)
| 622 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class A__ ( __lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Dict =1
@register_to_config
def __init__( self , lowerCamelCase = 1000 , lowerCamelCase = None ) -> List[Any]:
"""simple docstring"""
self.set_timesteps(UpperCamelCase_ )
# standard deviation of the initial noise distribution
__magic_name__ : Dict = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__magic_name__ : str = 4
# running values
__magic_name__ : int = []
def lowercase ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple:
"""simple docstring"""
__magic_name__ : int = num_inference_steps
__magic_name__ : Optional[Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__magic_name__ : Optional[int] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__magic_name__ : Optional[Any] = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__magic_name__ : Union[str, Any] = torch.sin(steps * math.pi / 2 ) ** 2
__magic_name__ : Optional[Any] = (1.0 - self.betas**2) ** 0.5
__magic_name__ : List[str] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__magic_name__ : Optional[Any] = timesteps.to(UpperCamelCase_ )
__magic_name__ : int = []
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Any:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
__magic_name__ : List[str] = (self.timesteps == timestep).nonzero().item()
__magic_name__ : int = timestep_index + 1
__magic_name__ : Any = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase_ )
if len(self.ets ) == 1:
__magic_name__ : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__magic_name__ : Dict = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__magic_name__ : Dict = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__magic_name__ : Any = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__magic_name__ : List[Any] = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowercase ( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return sample
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
__magic_name__ : List[str] = self.alphas[timestep_index]
__magic_name__ : Tuple = self.betas[timestep_index]
__magic_name__ : List[Any] = self.alphas[prev_timestep_index]
__magic_name__ : Tuple = self.betas[prev_timestep_index]
__magic_name__ : List[str] = (sample - sigma * ets) / max(UpperCamelCase_ , 1e-8 )
__magic_name__ : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Any:
"""simple docstring"""
return self.config.num_train_timesteps
| 714 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 16
lowercase_ = 32
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = 16, UpperCAmelCase = "bert-base-cased" ) ->Tuple:
"""simple docstring"""
__magic_name__ : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCAmelCase )
__magic_name__ : List[str] = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ : List[Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCAmelCase, max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ : Tuple = datasets.map(
UpperCAmelCase, batched=UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ : Optional[int] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(UpperCAmelCase, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ : List[Any] = DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase )
__magic_name__ : int = DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->Tuple:
"""simple docstring"""
model.eval()
__magic_name__ : Optional[Any] = 0
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ : List[Any] = model(**UpperCAmelCase )
__magic_name__ : List[str] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ : Optional[int] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase ) - 1:
__magic_name__ : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase, references=UpperCAmelCase, )
__magic_name__ : Optional[Any] = metric.compute()
return eval_metric["accuracy"]
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
__magic_name__ : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ : Tuple = config['''lr''']
__magic_name__ : Any = int(config['''num_epochs'''] )
__magic_name__ : Tuple = int(config['''seed'''] )
__magic_name__ : Any = int(config['''batch_size'''] )
__magic_name__ : int = args.model_name_or_path
set_seed(UpperCAmelCase )
__magic_name__ , __magic_name__ : Optional[Any] = get_dataloaders(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ : Dict = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase, return_dict=UpperCAmelCase )
# Instantiate optimizer
__magic_name__ : Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ : List[str] = optimizer_cls(params=model.parameters(), lr=UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ : Tuple = 1
__magic_name__ : str = (len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase, num_warmup_steps=0, num_training_steps=UpperCAmelCase, )
else:
__magic_name__ : Optional[Any] = DummyScheduler(UpperCAmelCase, total_num_steps=UpperCAmelCase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : str = accelerator.prepare(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
__magic_name__ : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ : Any = 0
__magic_name__ : int = evaluate.load('''glue''', '''mrpc''' )
__magic_name__ : Tuple = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ : int = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ : List[Any] = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ : str = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ : Dict = int(UpperCAmelCase ) + 1
__magic_name__ : Optional[Any] = evaluation_loop(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
accelerator.print('''resumed checkpoint performance:''', UpperCAmelCase )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir, F'''state_{starting_epoch-1}.json''' ), '''r''' ) as f:
__magic_name__ : Any = json.load(UpperCAmelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ : str = {}
for epoch in range(UpperCAmelCase, UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
__magic_name__ : Optional[Any] = model(**UpperCAmelCase )
__magic_name__ : str = outputs.loss
__magic_name__ : str = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ : str = F'''epoch_{epoch}'''
__magic_name__ : Any = os.path.join(args.output_dir, UpperCAmelCase )
accelerator.save_state(UpperCAmelCase )
__magic_name__ : Optional[int] = evaluation_loop(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
__magic_name__ : List[str] = accuracy
__magic_name__ : List[Any] = lr_scheduler.get_lr()[0]
__magic_name__ : Tuple = optimizer.param_groups[0]['''lr''']
__magic_name__ : Optional[int] = epoch
__magic_name__ : Union[str, Any] = overall_step
accelerator.print(F'''epoch {epoch}:''', UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F'''state_{epoch}.json''' ), '''w''' ) as f:
json.dump(UpperCAmelCase, UpperCAmelCase )
def lowerCAmelCase ( ) ->List[str]:
"""simple docstring"""
__magic_name__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=UpperCAmelCase, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=UpperCAmelCase, )
parser.add_argument(
'''--output_dir''', type=UpperCAmelCase, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--resume_from_checkpoint''', type=UpperCAmelCase, default=UpperCAmelCase, help='''If the training should continue from a checkpoint folder.''', )
parser.add_argument(
'''--partial_train_epoch''', type=UpperCAmelCase, default=UpperCAmelCase, help='''If passed, the training will stop after this number of epochs.''', )
parser.add_argument(
'''--num_epochs''', type=UpperCAmelCase, default=2, help='''Number of train epochs.''', )
__magic_name__ : List[str] = parser.parse_args()
__magic_name__ : str = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase, UpperCAmelCase )
if __name__ == "__main__":
main()
| 336 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> YolosConfig:
_A = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_A = 192
_A = 768
_A = 12
_A = 3
_A = [800, 1_333]
_A = False
elif yolos_name == "yolos_s_dWr":
_A = 330
_A = 14
_A = 6
_A = 1_320
elif "yolos_s" in yolos_name:
_A = 384
_A = 1_536
_A = 12
_A = 6
elif "yolos_b" in yolos_name:
_A = [800, 1_344]
_A = 91
_A = '''huggingface/label-files'''
_A = '''coco-detection-id2label.json'''
_A = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
_A = {int(_snake_case ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :YolosConfig , _snake_case :bool = False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[: config.hidden_size, :]
_A = in_proj_bias[: config.hidden_size]
_A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A = in_proj_weight[-config.hidden_size :, :]
_A = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> str:
if "backbone" in name:
_A = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
_A = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
_A = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
_A = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
_A = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
_A = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
_A = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_A = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_A = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_A = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_A = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_A = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
_A = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
_A = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
_A = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :YolosForObjectDetection ) -> dict:
for key in orig_state_dict.copy().keys():
_A = orig_state_dict.pop(_snake_case )
if "qkv" in key:
_A = key.split('''.''' )
_A = int(key_split[2] )
_A = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_A = val[:dim, :]
_A = val[
dim : dim * 2, :
]
_A = val[-dim:, :]
else:
_A = val[:dim]
_A = val[dim : dim * 2]
_A = val[-dim:]
else:
_A = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ ( ) -> torch.Tensor:
_A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str , _snake_case :str , _snake_case :bool = False ) -> Dict:
_A = get_yolos_config(_snake_case )
# load original state_dict
_A = torch.load(_snake_case , map_location='''cpu''' )['''model''']
# load 🤗 model
_A = YolosForObjectDetection(_snake_case )
model.eval()
_A = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by YolosImageProcessor
_A = 800 if yolos_name != '''yolos_ti''' else 512
_A = YolosImageProcessor(format='''coco_detection''' , size=_snake_case )
_A = image_processor(images=prepare_img() , return_tensors='''pt''' )
_A = model(**_snake_case )
_A , _A = outputs.logits, outputs.pred_boxes
_A , _A = None, None
if yolos_name == "yolos_ti":
_A = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
_A = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
_A = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
_A = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
_A = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
_A = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
_A = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
_A = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
_A = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
_A = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _snake_case , atol=1E-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
_A = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
_A = model_mapping[yolos_name]
image_processor.push_to_hub(_snake_case , organization='''hustvl''' )
model.push_to_hub(_snake_case , organization='''hustvl''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 2 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : int , lowerCAmelCase__ : int) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0) != 0)
def SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
assert nand_gate(0 , 0) == 1
assert nand_gate(0 , 1) == 1
assert nand_gate(1 , 0) == 1
assert nand_gate(1 , 1) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 125 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
return n == n[::-1]
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ = 1_0_0_0_0_0_0 ):
SCREAMING_SNAKE_CASE = 0
for i in range(1, SCREAMING_SNAKE_CASE_ ):
if is_palindrome(SCREAMING_SNAKE_CASE_ ) and is_palindrome(bin(SCREAMING_SNAKE_CASE_ ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 721 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ):
SCREAMING_SNAKE_CASE = []
for old_item in old_list:
SCREAMING_SNAKE_CASE = old_item.replace('in_layers.0', 'norm1' )
SCREAMING_SNAKE_CASE = new_item.replace('in_layers.2', 'conv1' )
SCREAMING_SNAKE_CASE = new_item.replace('out_layers.0', 'norm2' )
SCREAMING_SNAKE_CASE = new_item.replace('out_layers.3', 'conv2' )
SCREAMING_SNAKE_CASE = new_item.replace('emb_layers.1', 'time_emb_proj' )
SCREAMING_SNAKE_CASE = new_item.replace('skip_connection', 'conv_shortcut' )
SCREAMING_SNAKE_CASE = shave_segments(SCREAMING_SNAKE_CASE_, n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ):
SCREAMING_SNAKE_CASE = []
for old_item in old_list:
SCREAMING_SNAKE_CASE = old_item
SCREAMING_SNAKE_CASE = new_item.replace('norm.weight', 'group_norm.weight' )
SCREAMING_SNAKE_CASE = new_item.replace('norm.bias', 'group_norm.bias' )
SCREAMING_SNAKE_CASE = new_item.replace('proj_out.weight', 'proj_attn.weight' )
SCREAMING_SNAKE_CASE = new_item.replace('proj_out.bias', 'proj_attn.bias' )
SCREAMING_SNAKE_CASE = shave_segments(SCREAMING_SNAKE_CASE_, n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ):
assert isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
SCREAMING_SNAKE_CASE = old_checkpoint[path]
SCREAMING_SNAKE_CASE = old_tensor.shape[0] // 3
SCREAMING_SNAKE_CASE = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
SCREAMING_SNAKE_CASE = old_tensor.shape[0] // config['num_head_channels'] // 3
SCREAMING_SNAKE_CASE = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = old_tensor.split(channels // num_heads, dim=1 )
SCREAMING_SNAKE_CASE = query.reshape(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = key.reshape(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = value.reshape(SCREAMING_SNAKE_CASE_ )
for path in paths:
SCREAMING_SNAKE_CASE = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
SCREAMING_SNAKE_CASE = new_path.replace('middle_block.0', 'mid_block.resnets.0' )
SCREAMING_SNAKE_CASE = new_path.replace('middle_block.1', 'mid_block.attentions.0' )
SCREAMING_SNAKE_CASE = new_path.replace('middle_block.2', 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
SCREAMING_SNAKE_CASE = new_path.replace(replacement['old'], replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
SCREAMING_SNAKE_CASE = old_checkpoint[path['old']][:, :, 0]
else:
SCREAMING_SNAKE_CASE = old_checkpoint[path['old']]
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = checkpoint['time_embed.0.weight']
SCREAMING_SNAKE_CASE = checkpoint['time_embed.0.bias']
SCREAMING_SNAKE_CASE = checkpoint['time_embed.2.weight']
SCREAMING_SNAKE_CASE = checkpoint['time_embed.2.bias']
SCREAMING_SNAKE_CASE = checkpoint['input_blocks.0.0.weight']
SCREAMING_SNAKE_CASE = checkpoint['input_blocks.0.0.bias']
SCREAMING_SNAKE_CASE = checkpoint['out.0.weight']
SCREAMING_SNAKE_CASE = checkpoint['out.0.bias']
SCREAMING_SNAKE_CASE = checkpoint['out.2.weight']
SCREAMING_SNAKE_CASE = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
SCREAMING_SNAKE_CASE = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
SCREAMING_SNAKE_CASE = {
layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the middle blocks only
SCREAMING_SNAKE_CASE = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
SCREAMING_SNAKE_CASE = {
layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the output blocks only
SCREAMING_SNAKE_CASE = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
SCREAMING_SNAKE_CASE = {
layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
for i in range(1, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = (i - 1) // (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE = (i - 1) % (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key]
SCREAMING_SNAKE_CASE = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key]
if f'''input_blocks.{i}.0.op.weight''' in checkpoint:
SCREAMING_SNAKE_CASE = checkpoint[
f'''input_blocks.{i}.0.op.weight'''
]
SCREAMING_SNAKE_CASE = checkpoint[
f'''input_blocks.{i}.0.op.bias'''
]
continue
SCREAMING_SNAKE_CASE = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {'old': f'''input_blocks.{i}.0''', 'new': f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
SCREAMING_SNAKE_CASE = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, additional_replacements=[meta_path, resnet_op], config=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'old': f'''input_blocks.{i}.1''',
'new': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
SCREAMING_SNAKE_CASE = {
f'''input_blocks.{i}.1.qkv.bias''': {
'key': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''input_blocks.{i}.1.qkv.weight''': {
'key': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, additional_replacements=[meta_path], attention_paths_to_split=SCREAMING_SNAKE_CASE_, config=SCREAMING_SNAKE_CASE_, )
SCREAMING_SNAKE_CASE = middle_blocks[0]
SCREAMING_SNAKE_CASE = middle_blocks[1]
SCREAMING_SNAKE_CASE = middle_blocks[2]
SCREAMING_SNAKE_CASE = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
assign_to_checkpoint(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
assign_to_checkpoint(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, attention_paths_to_split=SCREAMING_SNAKE_CASE_, config=SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = i // (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE = i % (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE = [shave_segments(SCREAMING_SNAKE_CASE_, 2 ) for name in output_blocks[i]]
SCREAMING_SNAKE_CASE = {}
for layer in output_block_layers:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = layer.split('.' )[0], shave_segments(SCREAMING_SNAKE_CASE_, 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = [layer_name]
if len(SCREAMING_SNAKE_CASE_ ) > 1:
SCREAMING_SNAKE_CASE = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key]
SCREAMING_SNAKE_CASE = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key]
SCREAMING_SNAKE_CASE = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {'old': f'''output_blocks.{i}.0''', 'new': f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, additional_replacements=[meta_path], config=SCREAMING_SNAKE_CASE_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
SCREAMING_SNAKE_CASE = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
SCREAMING_SNAKE_CASE = checkpoint[
f'''output_blocks.{i}.{index}.conv.weight'''
]
SCREAMING_SNAKE_CASE = checkpoint[
f'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(SCREAMING_SNAKE_CASE_ ) == 2:
SCREAMING_SNAKE_CASE = []
if len(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'old': f'''output_blocks.{i}.1''',
'new': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
SCREAMING_SNAKE_CASE = {
f'''output_blocks.{i}.1.qkv.bias''': {
'key': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''output_blocks.{i}.1.qkv.weight''': {
'key': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, additional_replacements=[meta_path], attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None, config=SCREAMING_SNAKE_CASE_, )
else:
SCREAMING_SNAKE_CASE = renew_resnet_paths(SCREAMING_SNAKE_CASE_, n_shave_prefix_segments=1 )
for path in resnet_0_paths:
SCREAMING_SNAKE_CASE = '.'.join(['output_blocks', str(SCREAMING_SNAKE_CASE_ ), path['old']] )
SCREAMING_SNAKE_CASE = '.'.join(['up_blocks', str(SCREAMING_SNAKE_CASE_ ), 'resnets', str(SCREAMING_SNAKE_CASE_ ), path['new']] )
SCREAMING_SNAKE_CASE = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
snake_case = parser.parse_args()
snake_case = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
snake_case = json.loads(f.read())
snake_case = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
snake_case = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
snake_case = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
snake_case = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
snake_case = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 406 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_UpperCamelCase = logging.get_logger(__name__)
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = ["""input_values""", """attention_mask"""]
def __init__( self , A_ = 1 , A_ = 1_6000 , A_ = 0.0 , A_ = False , A_ = 80 , A_ = 16 , A_ = 64 , A_ = "hann_window" , A_ = 1.0 , A_ = 80 , A_ = 7600 , A_ = 1e-10 , A_ = 2 , A_ = True , **A_ , ) ->Any:
'''simple docstring'''
super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ )
__lowerCAmelCase : Union[str, Any] = do_normalize
__lowerCAmelCase : List[Any] = return_attention_mask
__lowerCAmelCase : Dict = num_mel_bins
__lowerCAmelCase : int = hop_length
__lowerCAmelCase : List[Any] = win_length
__lowerCAmelCase : Tuple = win_function
__lowerCAmelCase : Union[str, Any] = frame_signal_scale
__lowerCAmelCase : Union[str, Any] = fmin
__lowerCAmelCase : Optional[int] = fmax
__lowerCAmelCase : int = mel_floor
__lowerCAmelCase : int = reduction_factor
__lowerCAmelCase : int = win_length * sampling_rate // 1000
__lowerCAmelCase : Dict = hop_length * sampling_rate // 1000
__lowerCAmelCase : List[str] = optimal_fft_length(self.sample_size )
__lowerCAmelCase : Tuple = (self.n_fft // 2) + 1
__lowerCAmelCase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=A_ )
__lowerCAmelCase : Any = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , A_ , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , A_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCamelCase__ ( A_ , A_ , A_ = 0.0 ) ->List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
__lowerCAmelCase : List[str] = np.array(A_ , np.intaa )
__lowerCAmelCase : int = []
for vector, length in zip(A_ , attention_mask.sum(-1 ) ):
__lowerCAmelCase : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__lowerCAmelCase : List[str] = padding_value
normed_input_values.append(A_ )
else:
__lowerCAmelCase : str = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCamelCase__ ( self , A_ , ) ->np.ndarray:
'''simple docstring'''
__lowerCAmelCase : List[Any] = spectrogram(
A_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self , A_ = None , A_ = None , A_ = False , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , A_ = None , **A_ , ) ->BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
__lowerCAmelCase : Union[str, Any] = self._process_audio(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , **A_ , )
else:
__lowerCAmelCase : Optional[Any] = None
if audio_target is not None:
__lowerCAmelCase : Union[str, Any] = self._process_audio(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , **A_ , )
if inputs is None:
return inputs_target
else:
__lowerCAmelCase : str = inputs_target['''input_values''']
__lowerCAmelCase : str = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
__lowerCAmelCase : Tuple = decoder_attention_mask
return inputs
def UpperCamelCase__ ( self , A_ , A_ = False , A_ = False , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , **A_ , ) ->BatchFeature:
'''simple docstring'''
__lowerCAmelCase : Dict = isinstance(A_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__lowerCAmelCase : int = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCAmelCase : Optional[Any] = [np.asarray(A_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
__lowerCAmelCase : Tuple = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__lowerCAmelCase : Any = speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCAmelCase : List[str] = [speech]
# needed to make pad() work on spectrogram inputs
__lowerCAmelCase : Optional[Any] = self.feature_size
# convert into correct format for padding
if is_target:
__lowerCAmelCase : Optional[int] = [self._extract_mel_features(A_ ) for waveform in speech]
__lowerCAmelCase : Dict = BatchFeature({'''input_values''': features} )
__lowerCAmelCase : Tuple = self.num_mel_bins
else:
__lowerCAmelCase : int = BatchFeature({'''input_values''': speech} )
__lowerCAmelCase : List[str] = self.pad(
A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , )
__lowerCAmelCase : Union[str, Any] = feature_size_hack
# convert input values to correct format
__lowerCAmelCase : str = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
__lowerCAmelCase : str = [np.asarray(A_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(A_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__lowerCAmelCase : List[str] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(A_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__lowerCAmelCase : List[str] = input_values.astype(np.floataa )
# convert attention_mask to correct format
__lowerCAmelCase : Optional[Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__lowerCAmelCase : Any = [np.asarray(A_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__lowerCAmelCase : Dict = (
attention_mask
if self._get_padding_strategies(A_ , max_length=A_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__lowerCAmelCase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=A_ , padding_value=self.padding_value )
if return_tensors is not None:
__lowerCAmelCase : Dict = padded_inputs.convert_to_tensors(A_ )
return padded_inputs
def UpperCamelCase__ ( self ) ->Dict[str, Any]:
'''simple docstring'''
__lowerCAmelCase : str = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__lowerCAmelCase : List[str] = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 492 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 492 | 1 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _A ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
def _A ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
lowerCAmelCase__ = (
"Wrong input data's dimensions... "
F'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(lowerCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase__ = (
"Wrong input data's shape... "
F'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(lowerCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
lowerCAmelCase__ = (
"Input data have different datatype... "
F'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(lowerCAmelCase_ )
lowerCAmelCase__ = []
for value in value_array:
lowerCAmelCase__ = euclidean(lowerCAmelCase_ , dataset[0] )
lowerCAmelCase__ = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase__ = euclidean(lowerCAmelCase_ , lowerCAmelCase_ )
if dist > temp_dist:
lowerCAmelCase__ = temp_dist
lowerCAmelCase__ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _A ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray ):
"""simple docstring"""
return np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) / (norm(lowerCAmelCase_ ) * norm(lowerCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=-1 ) -> str:
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase__ = label_idx
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
lowerCAmelCase__ = []
lowerCAmelCase__ = []
else:
lowerCAmelCase__ = line.split(" " )
words.append(splits[0] )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
return examples
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> Dict:
lowerCAmelCase__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(SCREAMING_SNAKE_CASE__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Dict ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def a ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
return examples
def a ( self : int , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> int:
lowerCAmelCase__ = 0
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = preds_list[example_id]
lowerCAmelCase__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
example_id += 1
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 125 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Any = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __magic_name__ ( lowercase_ ):
UpperCamelCase_ :Tuple = """mctct"""
def __init__( self , _lowercase=8_065 , _lowercase=1_536 , _lowercase=36 , _lowercase=6_144 , _lowercase=4 , _lowercase=384 , _lowercase=920 , _lowercase=1e-5 , _lowercase=0.3 , _lowercase="relu" , _lowercase=0.02 , _lowercase=0.3 , _lowercase=0.3 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=1 , _lowercase=0.3 , _lowercase=1 , _lowercase=(7,) , _lowercase=(3,) , _lowercase=80 , _lowercase=1 , _lowercase=None , _lowercase="sum" , _lowercase=False , **_lowercase , )-> Any:
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = attention_head_dim
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = layerdrop
UpperCamelCase_ = hidden_act
UpperCamelCase_ = initializer_range
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = conv_glu_dim
UpperCamelCase_ = conv_dropout
UpperCamelCase_ = num_conv_layers
UpperCamelCase_ = input_feat_per_channel
UpperCamelCase_ = input_channels
UpperCamelCase_ = conv_channels
UpperCamelCase_ = ctc_loss_reduction
UpperCamelCase_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase_ = list(lowerCamelCase_ )
UpperCamelCase_ = list(lowerCamelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
| 628 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''donut-swin'''
SCREAMING_SNAKE_CASE__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : str , lowerCamelCase_ : Union[str, Any]=2_24 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : Dict=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Optional[Any]=[3, 6, 12, 24] , lowerCamelCase_ : List[str]=7 , lowerCamelCase_ : List[str]=4.0 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : Any=1e-5 , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Dict = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : Optional[int] = depths
SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = num_heads
SCREAMING_SNAKE_CASE : Optional[int] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : Dict = qkv_bias
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = drop_path_rate
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : str = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
| 379 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ = True
except ImportError:
a_ = False
try:
from torch.hub import _get_torch_home
a_ = _get_torch_home()
except ImportError:
a_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
a_ = os.path.join(torch_cache_home, """transformers""")
a_ = """https://cdn.huggingface.co"""
a_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
a_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
a_ = os.path.join(PATH, """config.yaml""")
a_ = os.path.join(PATH, """attributes.txt""")
a_ = os.path.join(PATH, """objects.txt""")
a_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
a_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
a_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
a_ = """pytorch_model.bin"""
a_ = """config.yaml"""
def a__ ( _UpperCamelCase : Dict=OBJECTS ,_UpperCamelCase : Optional[int]=ATTRIBUTES ):
__lowerCamelCase = []
with open(_snake_case ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__lowerCamelCase = []
with open(_snake_case ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def a__ ( _UpperCamelCase : Optional[int] ):
__lowerCamelCase = OrderedDict()
with open(_snake_case ,'''rb''' ) as f:
__lowerCamelCase = pkl.load(_snake_case )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__lowerCamelCase = ckp.pop(_snake_case )
if isinstance(_snake_case ,np.ndarray ):
__lowerCamelCase = torch.tensor(_snake_case )
else:
assert isinstance(_snake_case ,torch.tensor ), type(_snake_case )
__lowerCamelCase = v
return r
class __lowerCAmelCase :
lowerCAmelCase__ = {}
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "root" , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = name
__lowerCamelCase = level
__lowerCamelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__lowerCamelCase = copy.deepcopy(__UpperCAmelCase )
__lowerCamelCase = copy.deepcopy(__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = Config(__UpperCAmelCase , name=__UpperCAmelCase , level=level + 1 )
__lowerCamelCase = v
setattr(self , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = d
def __repr__( self ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = val
__lowerCamelCase = val
__lowerCamelCase = key.split('''.''' )
__lowerCamelCase = len(__UpperCAmelCase ) - 1
__lowerCamelCase = self._pointer
if len(__UpperCAmelCase ) > 1:
for i, l in enumerate(__UpperCAmelCase ):
if hasattr(self , __UpperCAmelCase ) and isinstance(getattr(self , __UpperCAmelCase ) , __UpperCAmelCase ):
setattr(getattr(self , __UpperCAmelCase ) , '''.'''.join(levels[i:] ) , __UpperCAmelCase )
if l == last_level:
__lowerCamelCase = val
else:
__lowerCamelCase = pointer[l]
def lowerCamelCase ( self ):
'''simple docstring'''
return self._pointer
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
with open(F"""{file_name}""" , '''w''' ) as stream:
dump(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
with open(F"""{file_name}""" , '''w''' ) as stream:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
@staticmethod
def lowerCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
with open(__UpperCAmelCase ) as stream:
__lowerCamelCase = load(__UpperCAmelCase , Loader=__UpperCAmelCase )
return data
def __str__( self ):
'''simple docstring'''
__lowerCamelCase = ''' '''
if self._name != "root":
__lowerCamelCase = F"""{t * (self._level-1)}{self._name}:\n"""
else:
__lowerCamelCase = ''''''
__lowerCamelCase = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
r += F"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += F"""{t * (self._level)}{k}: {v} ({type(__UpperCAmelCase ).__name__})\n"""
__lowerCamelCase = level
return r[:-1]
@classmethod
def lowerCamelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
return cls(__UpperCAmelCase )
@classmethod
def lowerCamelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = kwargs.pop('''cache_dir''' , __UpperCAmelCase )
__lowerCamelCase = kwargs.pop('''force_download''' , __UpperCAmelCase )
__lowerCamelCase = kwargs.pop('''resume_download''' , __UpperCAmelCase )
__lowerCamelCase = kwargs.pop('''proxies''' , __UpperCAmelCase )
__lowerCamelCase = kwargs.pop('''local_files_only''' , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ):
__lowerCamelCase = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
elif os.path.isfile(__UpperCAmelCase ) or is_remote_url(__UpperCAmelCase ):
__lowerCamelCase = pretrained_model_name_or_path
else:
__lowerCamelCase = hf_bucket_url(__UpperCAmelCase , filename=__UpperCAmelCase , use_cdn=__UpperCAmelCase )
try:
# Load from URL or cache if already cached
__lowerCamelCase = cached_path(
__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__lowerCamelCase = Config.load_yaml(__UpperCAmelCase )
except EnvironmentError:
__lowerCamelCase = '''Can\'t load config for'''
raise EnvironmentError(__UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(__UpperCAmelCase ), kwargs
def a__ ( _UpperCamelCase : Dict ):
__lowerCamelCase = torch.load('''dump.pt''' ,map_location=in_tensor.device )
__lowerCamelCase = in_tensor.numpy()
__lowerCamelCase = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(_snake_case ,_snake_case ,rtol=0.01 ,atol=0.1 ), (
F"""{sum([1 for x in np.isclose(_snake_case ,_snake_case ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %"""
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def a__ ( _UpperCamelCase : Optional[Any] ):
__lowerCamelCase = urlparse(_snake_case )
return parsed.scheme in ("http", "https")
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : List[str] ,_UpperCamelCase : List[Any]=True ):
__lowerCamelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__lowerCamelCase = '''/''' not in model_id
if legacy_format:
return F"""{endpoint}/{model_id}-{filename}"""
else:
return F"""{endpoint}/{model_id}/{filename}"""
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Dict ,_UpperCamelCase : Optional[Any]=None ,_UpperCamelCase : Optional[Any]=0 ,_UpperCamelCase : str=None ,):
__lowerCamelCase = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_snake_case ,_snake_case ):
ua += "; " + "; ".join('''{}/{}'''.format(_snake_case ,_snake_case ) for k, v in user_agent.items() )
elif isinstance(_snake_case ,_snake_case ):
ua += "; " + user_agent
__lowerCamelCase = {'''user-agent''': ua}
if resume_size > 0:
__lowerCamelCase = '''bytes=%d-''' % (resume_size,)
__lowerCamelCase = requests.get(_snake_case ,stream=_snake_case ,proxies=_snake_case ,headers=_snake_case )
if response.status_code == 4_16: # Range not satisfiable
return
__lowerCamelCase = response.headers.get('''Content-Length''' )
__lowerCamelCase = resume_size + int(_snake_case ) if content_length is not None else None
__lowerCamelCase = tqdm(
unit='''B''' ,unit_scale=_snake_case ,total=_snake_case ,initial=_snake_case ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_snake_case ) )
temp_file.write(_snake_case )
progress.close()
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Union[str, Any]=False ,_UpperCamelCase : List[Any]=None ,_UpperCamelCase : str=10 ,_UpperCamelCase : List[Any]=False ,_UpperCamelCase : str=None ,_UpperCamelCase : int=False ,):
if cache_dir is None:
__lowerCamelCase = TRANSFORMERS_CACHE
if isinstance(_snake_case ,_snake_case ):
__lowerCamelCase = str(_snake_case )
os.makedirs(_snake_case ,exist_ok=_snake_case )
__lowerCamelCase = None
if not local_files_only:
try:
__lowerCamelCase = requests.head(_snake_case ,allow_redirects=_snake_case ,proxies=_snake_case ,timeout=_snake_case )
if response.status_code == 2_00:
__lowerCamelCase = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__lowerCamelCase = url_to_filename(_snake_case ,_snake_case )
# get cache path to put the file
__lowerCamelCase = os.path.join(_snake_case ,_snake_case )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_snake_case ):
return cache_path
else:
__lowerCamelCase = [
file
for file in fnmatch.filter(os.listdir(_snake_case ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(_snake_case ) > 0:
return os.path.join(_snake_case ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(_snake_case ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__lowerCamelCase = cache_path + '''.lock'''
with FileLock(_snake_case ):
# If the download just completed while the lock was activated.
if os.path.exists(_snake_case ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__lowerCamelCase = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(_snake_case ,'''a+b''' ) as f:
yield f
__lowerCamelCase = _resumable_file_manager
if os.path.exists(_snake_case ):
__lowerCamelCase = os.stat(_snake_case ).st_size
else:
__lowerCamelCase = 0
else:
__lowerCamelCase = partial(tempfile.NamedTemporaryFile ,dir=_snake_case ,delete=_snake_case )
__lowerCamelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,_snake_case ,temp_file.name ,)
http_get(
_snake_case ,_snake_case ,proxies=_snake_case ,resume_size=_snake_case ,user_agent=_snake_case ,)
os.replace(temp_file.name ,_snake_case )
__lowerCamelCase = {'''url''': url, '''etag''': etag}
__lowerCamelCase = cache_path + '''.json'''
with open(_snake_case ,'''w''' ) as meta_file:
json.dump(_snake_case ,_snake_case )
return cache_path
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str]=None ):
__lowerCamelCase = url.encode('''utf-8''' )
__lowerCamelCase = shaaaa(_snake_case )
__lowerCamelCase = url_hash.hexdigest()
if etag:
__lowerCamelCase = etag.encode('''utf-8''' )
__lowerCamelCase = shaaaa(_snake_case )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : int=None ,_UpperCamelCase : Optional[Any]=False ,_UpperCamelCase : int=None ,_UpperCamelCase : Any=False ,_UpperCamelCase : Any=None ,_UpperCamelCase : List[str]=False ,_UpperCamelCase : List[Any]=False ,_UpperCamelCase : Any=False ,):
if cache_dir is None:
__lowerCamelCase = TRANSFORMERS_CACHE
if isinstance(_snake_case ,_snake_case ):
__lowerCamelCase = str(_snake_case )
if isinstance(_snake_case ,_snake_case ):
__lowerCamelCase = str(_snake_case )
if is_remote_url(_snake_case ):
# URL, so get it from the cache (downloading if necessary)
__lowerCamelCase = get_from_cache(
_snake_case ,cache_dir=_snake_case ,force_download=_snake_case ,proxies=_snake_case ,resume_download=_snake_case ,user_agent=_snake_case ,local_files_only=_snake_case ,)
elif os.path.exists(_snake_case ):
# File, and it exists.
__lowerCamelCase = url_or_filename
elif urlparse(_snake_case ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(_snake_case ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(_snake_case ) )
if extract_compressed_file:
if not is_zipfile(_snake_case ) and not tarfile.is_tarfile(_snake_case ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__lowerCamelCase ,__lowerCamelCase = os.path.split(_snake_case )
__lowerCamelCase = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
__lowerCamelCase = os.path.join(_snake_case ,_snake_case )
if os.path.isdir(_snake_case ) and os.listdir(_snake_case ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__lowerCamelCase = output_path + '''.lock'''
with FileLock(_snake_case ):
shutil.rmtree(_snake_case ,ignore_errors=_snake_case )
os.makedirs(_snake_case )
if is_zipfile(_snake_case ):
with ZipFile(_snake_case ,'''r''' ) as zip_file:
zip_file.extractall(_snake_case )
zip_file.close()
elif tarfile.is_tarfile(_snake_case ):
__lowerCamelCase = tarfile.open(_snake_case )
tar_file.extractall(_snake_case )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(_snake_case ) )
return output_path_extracted
return output_path
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : int="," ):
assert isinstance(_snake_case ,_snake_case )
if os.path.isfile(_snake_case ):
with open(_snake_case ) as f:
__lowerCamelCase = eval(f.read() )
else:
__lowerCamelCase = requests.get(_snake_case )
try:
__lowerCamelCase = requests.json()
except Exception:
__lowerCamelCase = req.content.decode()
assert data is not None, "could not connect"
try:
__lowerCamelCase = eval(_snake_case )
except Exception:
__lowerCamelCase = data.split('''\n''' )
req.close()
return data
def a__ ( _UpperCamelCase : List[Any] ):
__lowerCamelCase = requests.get(_snake_case )
__lowerCamelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def a__ ( _UpperCamelCase : Any ):
__lowerCamelCase = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_snake_case )
with open(_snake_case ,'''rb''' ) as stream:
__lowerCamelCase = pkl.load(_snake_case )
__lowerCamelCase = weights.pop('''model''' )
__lowerCamelCase = {}
for k, v in model.items():
__lowerCamelCase = torch.from_numpy(_snake_case )
if "running_var" in k:
__lowerCamelCase = torch.tensor([0] )
__lowerCamelCase = k.replace('''running_var''' ,'''num_batches_tracked''' )
__lowerCamelCase = zero
return new
def a__ ( ):
print(F"""{os.path.abspath(os.path.join(_snake_case ,os.pardir ) )}/demo.ipynb""" )
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : str="RGB" ):
assert isinstance(_snake_case ,_snake_case )
if os.path.isfile(_snake_case ):
__lowerCamelCase = cva.imread(_snake_case )
else:
__lowerCamelCase = get_image_from_url(_snake_case )
assert img is not None, F"""could not connect to: {im}"""
__lowerCamelCase = cva.cvtColor(_snake_case ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
__lowerCamelCase = img[:, :, ::-1]
return img
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Tuple=1 ):
return (images[i : i + batch] for i in range(0 ,len(_snake_case ) ,_snake_case ))
| 708 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = RoFormerTokenizer
lowerCAmelCase__ = RoFormerTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''永和服装饰品有限公司,今天天气非常好'''
__lowerCamelCase = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
| 622 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__lowercase : List[str] = logging.get_logger(__name__)
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
snake_case : str = json.loads(__A )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
snake_case : List[Any] = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
snake_case : int = json.loads(__A )
if not mpi_options.get("""sagemaker_mpi_enabled""" , __A ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : str = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def snake_case_ ( self ):
'''simple docstring'''
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" ,SCREAMING_SNAKE_CASE_ ,)
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
snake_case : Tuple = torch.device("""cpu""" )
snake_case : Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
snake_case : Tuple = smp.local_rank()
snake_case : List[str] = torch.device("""cuda""" ,SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" ,timeout=self.ddp_timeout_delta )
snake_case : Union[str, Any] = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
snake_case : List[str] = torch.device("""cuda""" ,self.local_rank )
snake_case : Tuple = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
snake_case : Any = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
snake_case : Optional[int] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" ,timeout=self.ddp_timeout_delta )
snake_case : Tuple = torch.device("""cuda""" ,self.local_rank )
snake_case : Optional[int] = 1
if device.type == "cuda":
torch.cuda.set_device(SCREAMING_SNAKE_CASE_ )
return device
@property
def snake_case_ ( self ):
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def snake_case_ ( self ):
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def snake_case_ ( self ):
'''simple docstring'''
return False
| 36 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''') | 45 | 0 |
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase_ = os.path.join(git_repo_path, '''src''', '''transformers''')
lowercase_ = '\n{0} = None\n'
lowercase_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
lowercase_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class __a ( unittest.TestCase ):
def UpperCamelCase ( self : str)-> Optional[Any]:
__lowerCAmelCase =find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""")
self.assertIsNone(UpperCamelCase__)
__lowerCAmelCase =find_backend(""" if not is_tokenizers_available():""")
self.assertEqual(UpperCamelCase__ , """tokenizers""")
__lowerCAmelCase =find_backend(""" if not is_tensorflow_text_available():""")
self.assertEqual(UpperCamelCase__ , """tensorflow_text""")
__lowerCAmelCase =find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""")
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers""")
__lowerCAmelCase =find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""")
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tensorflow_text""")
__lowerCAmelCase =find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""")
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers_and_vision""")
def UpperCamelCase ( self : Union[str, Any])-> Tuple:
__lowerCAmelCase =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , UpperCamelCase__)
self.assertIn("""tensorflow_text""" , UpperCamelCase__)
self.assertIn("""sentencepiece_and_tokenizers""" , UpperCamelCase__)
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""])
self.assertIn("""TFBertModel""" , objects["""tf"""])
self.assertIn("""FlaxBertModel""" , objects["""flax"""])
self.assertIn("""BertModel""" , objects["""torch"""])
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""])
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""])
def UpperCamelCase ( self : Optional[Any])-> Optional[Any]:
__lowerCAmelCase =create_dummy_object("""CONSTANT""" , """\'torch\'""")
self.assertEqual(UpperCamelCase__ , """\nCONSTANT = None\n""")
__lowerCAmelCase =create_dummy_object("""function""" , """\'torch\'""")
self.assertEqual(
UpperCamelCase__ , """\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n""")
__lowerCAmelCase ='''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__lowerCAmelCase =create_dummy_object("""FakeClass""" , """\'torch\'""")
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
def UpperCamelCase ( self : List[Any])-> Union[str, Any]:
__lowerCAmelCase ='''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__lowerCAmelCase =create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]})
self.assertEqual(dummy_files["""torch"""] , UpperCamelCase__)
| 712 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowercase_ = logging.get_logger(__name__)
class __a ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] , *snake_case_ : List[str] , **snake_case_ : Union[str, Any])-> None:
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_)
| 456 | 0 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
UpperCAmelCase__ : int = num_of_nodes
UpperCAmelCase__ : list[list[int]] = []
UpperCAmelCase__ : dict[int, int] = {}
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
self.m_edges.append([u_node, v_node, weight] )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCAmelCase__ : List[str] = self.find_component(_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if component_size[u_node] <= component_size[v_node]:
UpperCAmelCase__ : Tuple = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
UpperCAmelCase__ : Dict = self.find_component(_lowerCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCAmelCase__ : Optional[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = edge
UpperCAmelCase__ : List[str] = self.m_component[u]
UpperCAmelCase__ : Optional[Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCAmelCase__ : Union[str, Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = edge
UpperCAmelCase__ : str = self.m_component[u]
UpperCAmelCase__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
UpperCAmelCase__ : int = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCamelCase ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _snake_case ( unittest.TestCase):
def A__ ( self : List[Any] ):
lowercase__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowercase ) )
def A__ ( self : Any ):
lowercase__ = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowercase ) )
def A__ ( self : List[Any] ):
lowercase__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowercase ) )
def A__ ( self : int ):
lowercase__ = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowercase ) )
def A__ ( self : Optional[int] ):
lowercase__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__lowercase ) )
def A__ ( self : Optional[int] ):
lowercase__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : Optional[int] ):
lowercase__ = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : Optional[int] ):
# pass variant but use the non-variant filenames
lowercase__ = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : Union[str, Any] ):
lowercase__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase__ = "fp16"
self.assertFalse(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : int ):
lowercase__ = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : Optional[Any] ):
# pass variant but use the non-variant filenames
lowercase__ = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : List[Any] ):
lowercase__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
lowercase__ = "fp16"
self.assertFalse(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
| 413 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Dict = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
A : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 136 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
A : List[str] = get_logger()
A : Optional[dict] = None
class lowerCAmelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : List[str], _snake_case : Optional[Any]=None, _snake_case : int=None, **_snake_case : Optional[Any] ):
'''simple docstring'''
super().__init__(features=_snake_case )
import jax
from jaxlib.xla_client import Device
if isinstance(_snake_case, _snake_case ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(_snake_case )}, as `jaxlib.xla_extension.Device` '''
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
snake_case : Tuple =device if isinstance(_snake_case, _snake_case ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case : List[str] =self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case : Union[str, Any] =str(jax.devices()[0] )
snake_case : int =jnp_array_kwargs
@staticmethod
def __snake_case ( ):
'''simple docstring'''
import jax
return {str(_snake_case ): device for device in jax.devices()}
def __snake_case ( self : List[str], _snake_case : Dict ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_snake_case, _snake_case ) and column:
if all(
isinstance(_snake_case, jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_snake_case, axis=0 )
return column
def __snake_case ( self : Tuple, _snake_case : Dict ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_snake_case, (str, bytes, type(_snake_case )) ):
return value
elif isinstance(_snake_case, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
snake_case : Optional[int] ={}
if isinstance(_snake_case, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case : Optional[int] ={'''dtype''': jnp.intaa}
else:
snake_case : Dict ={'''dtype''': jnp.intaa}
elif isinstance(_snake_case, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
snake_case : int ={'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_snake_case, PIL.Image.Image ):
snake_case : int =np.asarray(_snake_case )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case : Optional[Any] =self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_snake_case, **{**default_dtype, **self.jnp_array_kwargs} )
def __snake_case ( self : Optional[Any], _snake_case : List[str] ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_snake_case, torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_snake_case, '''__array__''' ) and not isinstance(_snake_case, jax.Array ):
snake_case : Union[str, Any] =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_snake_case, np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_snake_case ) for substruct in data_struct] )
elif isinstance(_snake_case, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_snake_case ) for substruct in data_struct] )
return self._tensorize(_snake_case )
def __snake_case ( self : Optional[Any], _snake_case : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize, _snake_case, map_list=_snake_case )
def __snake_case ( self : Optional[Any], _snake_case : pa.Table ):
'''simple docstring'''
snake_case : Optional[int] =self.numpy_arrow_extractor().extract_row(_snake_case )
snake_case : int =self.python_features_decoder.decode_row(_snake_case )
return self.recursive_tensorize(_snake_case )
def __snake_case ( self : Optional[Any], _snake_case : pa.Table ):
'''simple docstring'''
snake_case : Any =self.numpy_arrow_extractor().extract_column(_snake_case )
snake_case : str =self.python_features_decoder.decode_column(_snake_case, pa_table.column_names[0] )
snake_case : Tuple =self.recursive_tensorize(_snake_case )
snake_case : str =self._consolidate(_snake_case )
return column
def __snake_case ( self : Optional[Any], _snake_case : pa.Table ):
'''simple docstring'''
snake_case : Optional[Any] =self.numpy_arrow_extractor().extract_batch(_snake_case )
snake_case : str =self.python_features_decoder.decode_batch(_snake_case )
snake_case : Any =self.recursive_tensorize(_snake_case )
for column_name in batch:
snake_case : Optional[int] =self._consolidate(batch[column_name] )
return batch
| 136 | 1 |
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] = 100 ) -> int:
"""simple docstring"""
__lowerCamelCase = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 469 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__: List[Any] = logging.get_logger(__name__)
A__: str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__: List[Any] = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
A__: str = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
A__: int = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = RoFormerTokenizer
def __init__( self: int , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Any=None , __lowerCamelCase: str=True , __lowerCamelCase: Any="[UNK]" , __lowerCamelCase: int="[SEP]" , __lowerCamelCase: Optional[int]="[PAD]" , __lowerCamelCase: Optional[int]="[CLS]" , __lowerCamelCase: Tuple="[MASK]" , __lowerCamelCase: List[str]=True , __lowerCamelCase: List[Any]=None , **__lowerCamelCase: Dict , ):
'''simple docstring'''
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase__: int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
):
UpperCamelCase__: int = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
UpperCamelCase__: Any = do_lower_case
UpperCamelCase__: Optional[int] = strip_accents
UpperCamelCase__: Any = pre_tok_class(**__lowerCamelCase )
UpperCamelCase__: Tuple = do_lower_case
def __getstate__( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.__dict__.copy()
UpperCamelCase__: Dict = BertPreTokenizer()
return state
def __setstate__( self: Dict , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = d
UpperCamelCase__: List[Any] = self.__dict__["_tokenizer"].get_vocab()
UpperCamelCase__: str = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=None ):
'''simple docstring'''
UpperCamelCase__: int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: Tuple = [self.sep_token_id]
UpperCamelCase__: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
UpperCamelCase__: Dict = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Any , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=None , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ):
'''simple docstring'''
UpperCamelCase__: List[str] = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 380 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : Any , _A : Optional[int] , _A : Optional[int]=7 , _A : Optional[Any]=3 , _A : List[Any]=18 , _A : Dict=30 , _A : Any=400 , _A : List[str]=True , _A : Any=None , _A : Union[str, Any]=True , _A : Union[str, Any]=False , _A : Any=True , _A : str=True , _A : Dict=[0.5, 0.5, 0.5] , _A : Dict=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Tuple = num_channels
snake_case_ : Dict = image_size
snake_case_ : str = min_resolution
snake_case_ : List[str] = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : Tuple = size if size is not None else {"""height""": 18, """width""": 20}
snake_case_ : Optional[Any] = do_thumbnail
snake_case_ : int = do_align_axis
snake_case_ : List[str] = do_pad
snake_case_ : List[str] = do_normalize
snake_case_ : Any = image_mean
snake_case_ : List[Any] = image_std
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: Tuple = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = DonutImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
"""simple docstring"""
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , 'do_resize' ) )
self.assertTrue(hasattr(A__ , 'size' ) )
self.assertTrue(hasattr(A__ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A__ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A__ , 'do_pad' ) )
self.assertTrue(hasattr(A__ , 'do_normalize' ) )
self.assertTrue(hasattr(A__ , 'image_mean' ) )
self.assertTrue(hasattr(A__ , 'image_std' ) )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
snake_case_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
snake_case_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
snake_case_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
@is_flaky()
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
snake_case_ : Union[str, Any] = image_processing(A__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
snake_case_ : Optional[int] = image_processing(A__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
snake_case_ : List[Any] = image_processing(A__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 720 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: Dict = PriorTransformer
__magic_name__: str = "hidden_states"
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = 4
snake_case_ : int = 8
snake_case_ : Dict = 7
snake_case_ : Union[str, Any] = floats_tensor((batch_size, embedding_dim) ).to(_A )
snake_case_ : int = floats_tensor((batch_size, embedding_dim) ).to(_A )
snake_case_ : str = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase_ ( self : List[Any] , _A : List[Any]=0 ) -> str:
"""simple docstring"""
torch.manual_seed(_A )
snake_case_ : List[Any] = 4
snake_case_ : str = 8
snake_case_ : Any = 7
snake_case_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_A )
snake_case_ : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(_A )
snake_case_ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (4, 8)
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return (4, 8)
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
snake_case_ : Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
snake_case_ ,snake_case_ : str = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_A )
snake_case_ : Optional[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[int] = self.prepare_init_args_and_inputs_for_common()
snake_case_ : Tuple = self.model_class(**_A )
snake_case_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[str] = [*signature.parameters.keys()]
snake_case_ : int = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , _A )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
snake_case_ : str = model.to(_A )
if hasattr(_A , 'set_default_attn_processor' ):
model.set_default_attn_processor()
snake_case_ : Optional[int] = self.get_dummy_seed_input()
with torch.no_grad():
snake_case_ : Any = model(**_A )[0]
snake_case_ : Any = output[0, :5].flatten().cpu()
print(_A )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
snake_case_ : str = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Optional[Any] , _A : int=1 , _A : int=768 , _A : str=77 , _A : List[str]=0 ) -> Tuple:
"""simple docstring"""
torch.manual_seed(_A )
snake_case_ : Dict = batch_size
snake_case_ : Any = embedding_dim
snake_case_ : int = num_embeddings
snake_case_ : Dict = torch.randn((batch_size, embedding_dim) ).to(_A )
snake_case_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_A )
snake_case_ : Optional[int] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def UpperCAmelCase_ ( self : Tuple , _A : List[Any] , _A : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(_A )
snake_case_ : Optional[Any] = self.get_dummy_seed_input(seed=_A )
with torch.no_grad():
snake_case_ : str = model(**_A )[0]
assert list(sample.shape ) == [1, 768]
snake_case_ : Optional[Any] = sample[0, :8].flatten().cpu()
print(_A )
snake_case_ : int = torch.tensor(_A )
assert torch_all_close(_A , _A , atol=1E-3 )
| 534 | 0 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = multiprocessing.Manager()
_UpperCamelCase = manager.list()
_UpperCamelCase = multiprocessing.Process(target=__snake_case , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _snake_case ( __snake_case , __snake_case , __snake_case ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCamelCase = shutil.rmtree
_UpperCamelCase = os.rmdir
_UpperCamelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCamelCase = {}
with swallow_io():
with time_limit(__snake_case ):
exec(__snake_case , __snake_case )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
_UpperCamelCase = rmtree
_UpperCamelCase = rmdir
_UpperCamelCase = chdir
@contextlib.contextmanager
def _snake_case ( __snake_case ):
def signal_handler(__snake_case , __snake_case ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __snake_case )
signal.signal(signal.SIGALRM , __snake_case )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _snake_case ( ):
_UpperCamelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(__snake_case ):
with contextlib.redirect_stderr(__snake_case ):
with redirect_stdin(__snake_case ):
yield
@contextlib.contextmanager
def _snake_case ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__snake_case ):
yield dirname
class lowerCAmelCase_ ( __lowercase ):
pass
class lowerCAmelCase_ ( io.StringIO ):
def UpperCamelCase_ ( self : str , *_A : Tuple , **_A : int ):
raise OSError
def UpperCamelCase_ ( self : int , *_A : List[Any] , **_A : Optional[Any] ):
raise OSError
def UpperCamelCase_ ( self : Optional[int] , *_A : Any , **_A : Dict ):
raise OSError
def UpperCamelCase_ ( self : int , *_A : Tuple , **_A : str ):
return False
class lowerCAmelCase_ ( contextlib._RedirectStream ): # type: ignore
UpperCAmelCase = "stdin"
@contextlib.contextmanager
def _snake_case ( __snake_case ):
if root == ".":
yield
return
_UpperCamelCase = os.getcwd()
os.chdir(__snake_case )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__snake_case )
def _snake_case ( __snake_case=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCamelCase = None
_UpperCamelCase = None
import os
_UpperCamelCase = '''1'''
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
import shutil
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
import subprocess
_UpperCamelCase = None # type: ignore
_UpperCamelCase = None
import sys
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
| 10 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 0 |
'''simple docstring'''
def lowerCAmelCase ():
"""simple docstring"""
_a = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_a = 6
_a = 1
_a = 1_901
_a = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_a = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_a = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_a = day - days_per_month[month - 2]
if month > 12:
year += 1
_a = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 716 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = [[1, 2, 4], [1, 2, 3, 4]]
_a = DisjunctiveConstraint(A )
self.assertTrue(isinstance(dc.token_ids , A ) )
with self.assertRaises(A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__ (self ) -> Any:
"""simple docstring"""
_a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(A ):
DisjunctiveConstraint(A ) # fails here
def a__ (self ) -> Dict:
"""simple docstring"""
_a = [[1, 2, 3], [1, 2, 4]]
_a = DisjunctiveConstraint(A )
_a , _a , _a = dc.update(1 )
_a = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_a , _a , _a = dc.update(2 )
_a = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_a , _a , _a = dc.update(3 )
_a = stepped is True and completed is True and reset is False
self.assertTrue(A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_a = DisjunctiveConstraint(A )
_a , _a , _a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_a , _a , _a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_a , _a , _a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_a , _a , _a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_a , _a , _a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_a , _a , _a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_a , _a , _a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 352 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.