code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ (__lowerCamelCase = "AAPL" ):
_SCREAMING_SNAKE_CASE : Dict = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_SCREAMING_SNAKE_CASE : str = BeautifulSoup(requests.get(__lowerCamelCase ).text, "html.parser" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div", class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 249
| 0
|
import itertools
import math
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
A__ = 2
while True:
if is_prime(SCREAMING_SNAKE_CASE_ ):
yield num
num += 1
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 706
|
import math
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 7
lowerCAmelCase__ = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 2_0 ) -> str:
'''simple docstring'''
A__ = math.comb(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , SCREAMING_SNAKE_CASE_ )
A__ = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 626
| 0
|
from statistics import mean, stdev
def A__ ( lowercase: list, lowercase: int = 3 ) -> list:
A : Dict =min(lowercase )
A : List[str] =max(lowercase )
# normalize data
return [round((x - x_min) / (x_max - x_min), lowercase ) for x in data]
def A__ ( lowercase: list, lowercase: int = 3 ) -> list:
A : Union[str, Any] =mean(lowercase )
A : Tuple =stdev(lowercase )
# standardize data
return [round((x - mu) / (sigma), lowercase ) for x in data]
| 305
|
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowercase : List[Any] ='''src/transformers'''
# Matches is_xxx_available()
_lowercase : List[str] =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase : Any =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase : Optional[int] =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase : int =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase : Tuple =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase : List[Any] =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase : List[Any] =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase : List[str] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase : Any =re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowercase : Optional[int] =re.compile(R'''^\s*else:''')
def A__ ( lowercase: int ) -> Optional[Any]:
if _re_test_backend.search(lowercase ) is None:
return None
A : List[str] =[b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def A__ ( lowercase: Tuple ) -> int:
with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
A : str =f.readlines()
A : List[str] =0
while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Union[str, Any] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
A : Union[str, Any] =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
A : List[str] =_re_one_line_import_struct.search(lowercase ).groups()[0]
A : Optional[int] =re.findall('\[([^\]]+)\]', lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
A : int =_re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
A : List[str] =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
A : Optional[int] ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : Dict =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : int =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : str =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
A : List[Any] =lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
A : List[str] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' )
A : Optional[Any] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
A : int =_re_between_brackets.search(lowercase ).groups()[0].split(', ' )
A : List[str] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[Any] =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : int =[]
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
A : List[str] =lines[line_index]
A : Optional[int] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Dict ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : Optional[Any] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[Any] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
A : List[str] =lines[line_index]
A : Optional[Any] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Any =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowercase: Dict, lowercase: str ) -> int:
def find_duplicates(lowercase: int ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : Dict =[]
for key in import_dict_objects.keys():
A : Optional[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : str =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Tuple ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A__ ( ) -> int:
A : List[str] =[]
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
A : Optional[int] =os.path.join(lowercase, '__init__.py' )
A : str =parse_init(lowercase )
if objects is not None:
A : Union[str, Any] =analyze_results(*lowercase )
if len(lowercase ) > 0:
A : Optional[int] =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError('\n\n'.join(lowercase ) )
def A__ ( ) -> Dict:
A : List[Any] =[]
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0:
continue
A : List[Any] =str((Path(lowercase ) / folder).relative_to(lowercase ) )
A : Union[str, Any] =short_path.replace(os.path.sep, '.' )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
A : int =str((Path(lowercase ) / fname).relative_to(lowercase ) )
A : str =short_path.replace('.py', '' ).replace(os.path.sep, '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase )
return submodules
_lowercase : Dict =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A__ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
A : Optional[Any] =importlib.util.spec_from_file_location(
'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
A : List[Any] =spec.loader.load_module()
A : Union[str, Any] =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase ) > 0:
A : List[Any] ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 305
| 1
|
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
UpperCamelCase : Tuple = _symbol_database.Default()
UpperCamelCase : Optional[int] = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
UpperCamelCase : List[str] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
UpperCamelCase : Optional[Any] = None
UpperCamelCase : List[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
UpperCamelCase : List[str] = 4_5
UpperCamelCase : Tuple = 1_5_8_1
UpperCamelCase : List[str] = 1_5_1_7
UpperCamelCase : Tuple = 1_5_7_0
UpperCamelCase : Dict = 1_5_8_4
UpperCamelCase : Any = 1_7_9_3
UpperCamelCase : Optional[int] = 1_7_9_5
UpperCamelCase : Any = 1_9_1_6
UpperCamelCase : Dict = 1_8_6_4
UpperCamelCase : int = 1_9_0_5
UpperCamelCase : Dict = 1_9_1_9
UpperCamelCase : Optional[Any] = 2_4_2_9
UpperCamelCase : Optional[int] = 2_2_0_8
UpperCamelCase : str = 2_4_1_8
UpperCamelCase : Optional[int] = 2_3_2_3
UpperCamelCase : Any = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 705
|
"""simple docstring"""
import numpy as np
import qiskit
def A ( snake_case :int = 8 , snake_case :int | None = None ) -> str:
__UpperCamelCase = np.random.default_rng(seed=snake_case )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__UpperCamelCase = 6 * key_len
# Measurement basis for Alice's qubits.
__UpperCamelCase = rng.integers(2 , size=snake_case )
# The set of states Alice will prepare.
__UpperCamelCase = rng.integers(2 , size=snake_case )
# Measurement basis for Bob's qubits.
__UpperCamelCase = rng.integers(2 , size=snake_case )
# Quantum Circuit to simulate BB84
__UpperCamelCase = qiskit.QuantumCircuit(snake_case , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case ):
if alice_state[index] == 1:
bbaa_circ.x(snake_case )
if alice_basis[index] == 1:
bbaa_circ.h(snake_case )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case ):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__UpperCamelCase = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__UpperCamelCase = qiskit.execute(snake_case , snake_case , shots=1 , seed_simulator=snake_case )
# Returns the result of measurement.
__UpperCamelCase = job.result().get_counts(snake_case ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__UpperCamelCase = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case , snake_case , snake_case )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__UpperCamelCase = gen_key[:key_len] if len(snake_case ) >= key_len else gen_key.ljust(snake_case , '0' )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 293
| 0
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
enable_full_determinism()
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase = UNetaDModel
_lowercase = "sample"
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = 4
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : Tuple = (32, 32)
_UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
_UpperCAmelCase : Dict = torch.tensor([10] ).to(_snake_case )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (3, 32, 32)
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (3, 32, 32)
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
_UpperCAmelCase : Dict = self.dummy_input
return init_dict, inputs_dict
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase = UNetaDModel
_lowercase = "sample"
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = 4
_UpperCAmelCase : int = 4
_UpperCAmelCase : Optional[Any] = (32, 32)
_UpperCAmelCase : int = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
_UpperCAmelCase : List[Any] = torch.tensor([10] ).to(_snake_case )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (4, 32, 32)
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (4, 32, 32)
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
_UpperCAmelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_snake_case )
_UpperCAmelCase : str = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=_snake_case )
model.to(_snake_case )
_UpperCAmelCase : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=_snake_case )
model_accelerate.to(_snake_case )
model_accelerate.eval()
_UpperCAmelCase : Optional[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase : Optional[Any] = noise.to(_snake_case )
_UpperCAmelCase : str = torch.tensor([10] * noise.shape[0] ).to(_snake_case )
_UpperCAmelCase : Optional[Any] = model_accelerate(_snake_case , _snake_case )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=_snake_case , low_cpu_mem_usage=_snake_case )
model_normal_load.to(_snake_case )
model_normal_load.eval()
_UpperCAmelCase : Optional[int] = model_normal_load(_snake_case , _snake_case )["sample"]
assert torch_all_close(_snake_case , _snake_case , rtol=1e-3 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(_snake_case )
_UpperCAmelCase : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase : int = noise.to(_snake_case )
_UpperCAmelCase : Tuple = torch.tensor([10] * noise.shape[0] ).to(_snake_case )
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(_snake_case , _snake_case ).sample
_UpperCAmelCase : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase : Optional[Any] = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1e-3 ) )
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase = UNetaDModel
_lowercase = "sample"
@property
def _UpperCAmelCase ( self , A_=(32, 32) ):
'''simple docstring'''
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : Any = 3
_UpperCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
_UpperCAmelCase : List[Any] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_snake_case )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (3, 32, 32)
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (3, 32, 32)
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1e-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
_UpperCAmelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Any = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_snake_case )
_UpperCAmelCase : Union[str, Any] = self.dummy_input
_UpperCAmelCase : Optional[Any] = floats_tensor((4, 3) + (256, 256) ).to(_snake_case )
_UpperCAmelCase : List[str] = noise
_UpperCAmelCase : List[Any] = model(**_snake_case )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(_snake_case )
_UpperCAmelCase : int = 4
_UpperCAmelCase : int = 3
_UpperCAmelCase : str = (256, 256)
_UpperCAmelCase : List[str] = torch.ones((batch_size, num_channels) + sizes ).to(_snake_case )
_UpperCAmelCase : int = torch.tensor(batch_size * [1e-4] ).to(_snake_case )
with torch.no_grad():
_UpperCAmelCase : str = model(_snake_case , _snake_case ).sample
_UpperCAmelCase : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase : int = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -10980.7129, -20028.8535, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1e-2 ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(_snake_case )
_UpperCAmelCase : Union[str, Any] = 4
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Optional[int] = (32, 32)
_UpperCAmelCase : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_snake_case )
_UpperCAmelCase : List[str] = torch.tensor(batch_size * [1e-4] ).to(_snake_case )
with torch.no_grad():
_UpperCAmelCase : int = model(_snake_case , _snake_case ).sample
_UpperCAmelCase : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase : Optional[int] = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1e-2 ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 300
|
"""simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , _snake_case : list[int] ) -> None:
SCREAMING_SNAKE_CASE__ = len(_snake_case )
SCREAMING_SNAKE_CASE__ = [0] * len_array
if len_array > 0:
SCREAMING_SNAKE_CASE__ = array[0]
for i in range(1 , _snake_case ):
SCREAMING_SNAKE_CASE__ = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : int , _snake_case : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : int ) -> bool:
SCREAMING_SNAKE_CASE__ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_snake_case )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159
| 0
|
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 134
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase : Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : int = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : Union[str, Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : Any = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
_UpperCamelCase : Any = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
_UpperCamelCase : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
_UpperCamelCase : Tuple = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_UpperCamelCase : int = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_UpperCamelCase : Tuple = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class a ( a_ ):
UpperCAmelCase_ : str =VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Union[str, Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : int =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class a ( a_ ):
UpperCAmelCase_ : Union[str, Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Dict =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_UpperCamelCase : Tuple = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_UpperCamelCase : int = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(a_ )
class a :
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
if titles is None and texts is None:
return super().__call__(
_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
elif titles is None or texts is None:
lowercase = titles if texts is None else texts
return super().__call__(
_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
lowercase = titles if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [titles]
lowercase = texts if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [texts]
lowercase = len(_lowerCamelCase )
lowercase = questions if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [questions] * n_passages
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
F'There should be as many titles than texts but got {len(_lowerCamelCase )} titles and {len(_lowerCamelCase )} texts.' )
lowercase = super().__call__(_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )['input_ids']
lowercase = super().__call__(_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )['input_ids']
lowercase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowerCamelCase , _lowerCamelCase )
]
}
if return_attention_mask is not False:
lowercase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase = attention_mask
return self.pad(_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1_6 , _lowerCamelCase = 6_4 , _lowerCamelCase = 4 , ):
lowercase = reader_input['input_ids']
lowercase , lowercase , lowercase = reader_output[:3]
lowercase = len(_lowerCamelCase )
lowercase = sorted(range(_lowerCamelCase ) , reverse=_lowerCamelCase , key=relevance_logits.__getitem__ )
lowercase = []
for doc_id in sorted_docs:
lowercase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase = sequence_ids.index(self.pad_token_id )
else:
lowercase = len(_lowerCamelCase )
lowercase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowerCamelCase , top_spans=_lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowerCamelCase , start_index=_lowerCamelCase , end_index=_lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = []
for start_index, start_score in enumerate(_lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
lowercase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
lowercase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class a ( a_, a_ ):
UpperCAmelCase_ : Union[str, Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : Union[str, Any] =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[Any] =READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Optional[int] =["input_ids", "attention_mask"]
| 134
| 1
|
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
SCREAMING_SNAKE_CASE__ = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = "hopper-medium-v2"
SCREAMING_SNAKE_CASE__ = gym.make(env_name)
SCREAMING_SNAKE_CASE__ = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
SCREAMING_SNAKE_CASE__ = env.reset()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1_000
SCREAMING_SNAKE_CASE__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
SCREAMING_SNAKE_CASE__ = pipeline(obs, planning_horizon=32)
# execute action in environment
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = env.step(denorm_actions)
SCREAMING_SNAKE_CASE__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
SCREAMING_SNAKE_CASE__ = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 532
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Any:
lowerCAmelCase = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
lowerCAmelCase = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(lowercase )
from datasets import load_dataset
lowerCAmelCase = load_dataset("""nielsr/rvlcdip-demo""" )
lowerCAmelCase = dataset["""train"""][0]["""image"""].convert("""RGB""" )
lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**lowercase )
lowerCAmelCase = outputs.logits
lowerCAmelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowercase )
lowerCAmelCase = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=lowercase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase , atol=1e-4 ) )
| 532
| 1
|
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = FlaxAutoencoderKL
@property
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = (32, 32)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = jax.random.uniform(UpperCamelCase__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
| 700
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCamelCase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__UpperCamelCase = {'''facebook/blenderbot-3B''': 128}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : int = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = BlenderbotTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> str:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , lowerCAmelCase__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __A ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self , lowerCAmelCase__ ) -> List[str]:
SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value
SCREAMING_SNAKE_CASE = value
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Dict:
return token_ids_a + [self.eos_token_id]
def __A ( self , lowerCAmelCase__ ) -> List[int]:
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = ' '.join(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.encode(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 327
| 0
|
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
'''simple docstring'''
def __init__( self ,__a ,__a=13 ,__a=7 ,__a=True ,__a=True ,__a=True ,__a=True ,__a=True ,__a=False ,__a=False ,__a=False ,__a=2 ,__a=99 ,__a=0 ,__a=32 ,__a=5 ,__a=4 ,__a=0.1 ,__a=0.1 ,__a=512 ,__a=2 ,__a=0.02 ,__a=2 ,__a=4 ,__a="last" ,__a=True ,__a=None ,__a=0 ,) -> List[Any]:
snake_case : List[Any] = parent
snake_case : List[str] = batch_size
snake_case : Optional[int] = seq_length
snake_case : List[str] = is_training
snake_case : Tuple = use_input_lengths
snake_case : Union[str, Any] = use_token_type_ids
snake_case : Tuple = use_labels
snake_case : List[str] = gelu_activation
snake_case : Union[str, Any] = sinusoidal_embeddings
snake_case : Optional[int] = causal
snake_case : int = asm
snake_case : List[str] = n_langs
snake_case : Tuple = vocab_size
snake_case : Any = n_special
snake_case : Union[str, Any] = hidden_size
snake_case : Optional[Any] = num_hidden_layers
snake_case : Tuple = num_attention_heads
snake_case : Any = hidden_dropout_prob
snake_case : Dict = attention_probs_dropout_prob
snake_case : Optional[Any] = max_position_embeddings
snake_case : List[Any] = type_sequence_label_size
snake_case : str = initializer_range
snake_case : Dict = num_labels
snake_case : Optional[int] = num_choices
snake_case : Optional[Any] = summary_type
snake_case : Dict = use_proj
snake_case : Tuple = scope
snake_case : List[str] = bos_token_id
def snake_case_ ( self ) -> Optional[Any]:
snake_case : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Union[str, Any] = None
if self.use_input_lengths:
snake_case : Any = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case : List[str] = None
if self.use_token_type_ids:
snake_case : str = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
snake_case : Union[str, Any] = None
snake_case : Tuple = None
snake_case : List[Any] = None
if self.use_labels:
snake_case : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case : Union[str, Any] = ids_tensor([self.batch_size] ,2 ).float()
snake_case : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
snake_case : Optional[int] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case_ ( self ) -> List[str]:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def snake_case_ ( self ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,) -> int:
snake_case : List[Any] = XLMModel(config=__a )
model.to(__a )
model.eval()
snake_case : Tuple = model(__a ,lengths=__a ,langs=__a )
snake_case : str = model(__a ,langs=__a )
snake_case : List[str] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,) -> List[Any]:
snake_case : Union[str, Any] = XLMWithLMHeadModel(__a )
model.to(__a )
model.eval()
snake_case : List[str] = model(__a ,token_type_ids=__a ,labels=__a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,) -> Dict:
snake_case : Optional[Any] = XLMForQuestionAnsweringSimple(__a )
model.to(__a )
model.eval()
snake_case : int = model(__a )
snake_case : Tuple = model(__a ,start_positions=__a ,end_positions=__a )
snake_case : str = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case_ ( self ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,) -> List[Any]:
snake_case : Dict = XLMForQuestionAnswering(__a )
model.to(__a )
model.eval()
snake_case : str = model(__a )
snake_case : Optional[int] = model(
__a ,start_positions=__a ,end_positions=__a ,cls_index=__a ,is_impossible=__a ,p_mask=__a ,)
snake_case : List[str] = model(
__a ,start_positions=__a ,end_positions=__a ,cls_index=__a ,is_impossible=__a ,)
((snake_case) , ) : Optional[int] = result_with_labels.to_tuple()
snake_case : str = model(__a ,start_positions=__a ,end_positions=__a )
((snake_case) , ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def snake_case_ ( self ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,) -> Tuple:
snake_case : Optional[Any] = XLMForSequenceClassification(__a )
model.to(__a )
model.eval()
snake_case : int = model(__a )
snake_case : Tuple = model(__a ,labels=__a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,) -> Optional[Any]:
snake_case : Tuple = self.num_labels
snake_case : Optional[int] = XLMForTokenClassification(__a )
model.to(__a )
model.eval()
snake_case : str = model(__a ,attention_mask=__a ,labels=__a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,) -> Any:
snake_case : Tuple = self.num_choices
snake_case : List[Any] = XLMForMultipleChoice(config=__a )
model.to(__a )
model.eval()
snake_case : List[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
snake_case : Any = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
snake_case : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
snake_case : Tuple = model(
__a ,attention_mask=__a ,token_type_ids=__a ,labels=__a ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def snake_case_ ( self ) -> Union[str, Any]:
snake_case : int = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : List[str] = config_and_inputs
snake_case : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class _a (a__, a__, a__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : int = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCAmelCase_ : Optional[Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case_ ( self ,__a ,__a ,__a ,__a ,__a ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case_ ( self ,__a ,__a ,__a=False ) -> Tuple:
snake_case : Dict = super()._prepare_for_class(__a ,__a ,return_labels=__a )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case : Optional[int] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__a )
snake_case : Dict = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__a )
return inputs_dict
def snake_case_ ( self ) -> List[Any]:
snake_case : Any = XLMModelTester(self )
snake_case : Any = ConfigTester(self ,config_class=__a ,emb_dim=37 )
def snake_case_ ( self ) -> int:
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Any:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__a )
def snake_case_ ( self ) -> Any:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__a )
def snake_case_ ( self ) -> str:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__a )
def snake_case_ ( self ) -> Dict:
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__a )
def snake_case_ ( self ) -> Union[str, Any]:
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__a )
def snake_case_ ( self ) -> int:
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__a )
def snake_case_ ( self ) -> Any:
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__a )
def snake_case_ ( self ,__a ,__a ,__a ,__a ,__a ,__a=False ,__a=1 ) -> int:
self.assertIsInstance(__a ,__a )
self.assertListEqual(
[isinstance(__a ,__a ) for iter_attentions in attentions] ,[True] * len(__a ) )
self.assertEqual(len(__a ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__a ):
# adds PAD dummy token
snake_case : str = min_length + idx + 1
snake_case : List[Any] = min_length + idx + 1
snake_case : int = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(__a ) )
def snake_case_ ( self ,__a ,__a ,__a ,__a ,__a ,__a=False ,__a=1 ) -> str:
self.assertIsInstance(__a ,__a )
self.assertListEqual(
[isinstance(__a ,__a ) for iter_hidden_states in hidden_states] ,[True] * len(__a ) ,)
self.assertEqual(len(__a ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__a ):
# adds PAD dummy token
snake_case : List[str] = min_length + idx + 1
snake_case : Optional[int] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(__a ) ,)
pass
@slow
def snake_case_ ( self ) -> Union[str, Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Any = XLMModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_ ( self ) -> List[str]:
snake_case : Tuple = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(__a )
snake_case : Union[str, Any] = torch.tensor([[14, 447]] ,dtype=torch.long ,device=__a ) # the president
snake_case : Optional[Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case : Dict = model.generate(__a ,do_sample=__a )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,__a )
| 116
|
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase : Any = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=3_0522, type=int)
lowercase : str = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowercase : int = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowercase : List[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase : int = [0] * args.vocab_size
for k, v in counter.items():
lowercase : List[Any] = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 116
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCamelCase = logging.get_logger(__name__)
class __a ( _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = ['pixel_values']
def __init__( self : Optional[int] , lowercase__ : bool = True , lowercase__ : Optional[Dict[str, int]] = None , lowercase__ : PILImageResampling = PILImageResampling.BILINEAR , lowercase__ : bool = True , lowercase__ : Dict[str, int] = None , lowercase__ : bool = True , lowercase__ : Union[int, float] = 1 / 2_55 , lowercase__ : bool = True , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , **lowercase__ : List[str] , ) ->None:
"""simple docstring"""
super().__init__(**__UpperCamelCase)
_lowercase = size if size is not None else {"""shortest_edge""": 2_56}
_lowercase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase)
_lowercase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_lowercase = get_size_dict(__UpperCamelCase , param_name="""crop_size""")
_lowercase = do_resize
_lowercase = size
_lowercase = resample
_lowercase = do_center_crop
_lowercase = crop_size
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_normalize
_lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self : List[str] , lowercase__ : np.ndarray , lowercase__ : Dict[str, int] , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : List[str] , ) ->np.ndarray:
"""simple docstring"""
_lowercase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
_lowercase = get_resize_output_image_size(__UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCamelCase)
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase)
def _UpperCAmelCase ( self : Tuple , lowercase__ : np.ndarray , lowercase__ : Dict[str, int] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Tuple , ) ->np.ndarray:
"""simple docstring"""
_lowercase = get_size_dict(__UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase)
def _UpperCAmelCase ( self : Union[str, Any] , lowercase__ : np.ndarray , lowercase__ : float , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Optional[Any]) ->np.ndarray:
"""simple docstring"""
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase)
def _UpperCAmelCase ( self : Optional[int] , lowercase__ : np.ndarray , lowercase__ : Union[float, List[float]] , lowercase__ : Union[float, List[float]] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : List[str] , ) ->np.ndarray:
"""simple docstring"""
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase)
def _UpperCAmelCase ( self : Union[str, Any] , lowercase__ : ImageInput , lowercase__ : Optional[bool] = None , lowercase__ : Dict[str, int] = None , lowercase__ : PILImageResampling = None , lowercase__ : bool = None , lowercase__ : Dict[str, int] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[float] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase__ : Optional[int] , ) ->List[Any]:
"""simple docstring"""
_lowercase = do_resize if do_resize is not None else self.do_resize
_lowercase = size if size is not None else self.size
_lowercase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase)
_lowercase = resample if resample is not None else self.resample
_lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase = crop_size if crop_size is not None else self.crop_size
_lowercase = get_size_dict(__UpperCamelCase , param_name="""crop_size""")
_lowercase = do_rescale if do_rescale is not None else self.do_rescale
_lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase = do_normalize if do_normalize is not None else self.do_normalize
_lowercase = image_mean if image_mean is not None else self.image_mean
_lowercase = image_std if image_std is not None else self.image_std
_lowercase = make_list_of_images(__UpperCamelCase)
if not valid_images(__UpperCamelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
_lowercase = [to_numpy_array(__UpperCamelCase) for image in images]
if do_resize:
_lowercase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase) for image in images]
if do_center_crop:
_lowercase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase) for image in images]
if do_rescale:
_lowercase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase) for image in images]
if do_normalize:
_lowercase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase) for image in images]
_lowercase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase) for image in images]
_lowercase = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase)
def _UpperCAmelCase ( self : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Tuple] = None) ->int:
"""simple docstring"""
_lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCamelCase) != len(__UpperCamelCase):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""")
if is_torch_tensor(__UpperCamelCase):
_lowercase = target_sizes.numpy()
_lowercase = []
for idx in range(len(__UpperCamelCase)):
_lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__UpperCamelCase)
_lowercase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__UpperCamelCase)
else:
_lowercase = logits.argmax(dim=1)
_lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 719
|
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : int = 'efficientnet'
def __init__( self : Optional[int] , lowercase__ : int = 3 , lowercase__ : int = 6_00 , lowercase__ : float = 2.0 , lowercase__ : float = 3.1 , lowercase__ : int = 8 , lowercase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase__ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowercase__ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowercase__ : List[int] = [] , lowercase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase__ : float = 0.25 , lowercase__ : str = "swish" , lowercase__ : int = 25_60 , lowercase__ : str = "mean" , lowercase__ : float = 0.02 , lowercase__ : float = 0.001 , lowercase__ : float = 0.99 , lowercase__ : float = 0.5 , lowercase__ : float = 0.2 , **lowercase__ : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**lowercase__)
_lowercase = num_channels
_lowercase = image_size
_lowercase = width_coefficient
_lowercase = depth_coefficient
_lowercase = depth_divisor
_lowercase = kernel_sizes
_lowercase = in_channels
_lowercase = out_channels
_lowercase = depthwise_padding
_lowercase = strides
_lowercase = num_block_repeats
_lowercase = expand_ratios
_lowercase = squeeze_expansion_ratio
_lowercase = hidden_act
_lowercase = hidden_dim
_lowercase = pooling_type
_lowercase = initializer_range
_lowercase = batch_norm_eps
_lowercase = batch_norm_momentum
_lowercase = dropout_rate
_lowercase = drop_connect_rate
_lowercase = sum(lowercase__) * 4
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : List[str] = version.parse('1.11' )
@property
def _UpperCAmelCase ( self : Union[str, Any]) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _UpperCAmelCase ( self : str) ->float:
"""simple docstring"""
return 1e-5
| 572
| 0
|
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str:
"""simple docstring"""
a = ''''''
for word_or_phrase in separated:
if not isinstance(snake_case_, snake_case_ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 387
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowerCamelCase_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : str ,__lowerCamelCase : float ,__lowerCamelCase : Callable ,__lowerCamelCase : int ,__lowerCamelCase : float = 1.0 ,__lowerCamelCase : str = None ,):
'''simple docstring'''
super().__init__()
a = initial_learning_rate
a = warmup_steps
a = power
a = decay_schedule_fn
a = name
def __call__( self : int ,__lowerCamelCase : str ):
'''simple docstring'''
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
a = tf.cast(__lowerCamelCase ,tf.floataa )
a = tf.cast(self.warmup_steps ,tf.floataa )
a = global_step_float / warmup_steps_float
a = self.initial_learning_rate * tf.math.pow(__lowerCamelCase ,self.power )
return tf.cond(
global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps ) ,name=__lowerCamelCase ,)
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ = 0.0, snake_case_ = 0.9, snake_case_ = 0.999, snake_case_ = 1e-8, snake_case_ = None, snake_case_ = None, snake_case_ = 0.0, snake_case_ = 1.0, snake_case_ = None, ) -> List[str]:
"""simple docstring"""
a = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=snake_case_, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=snake_case_, )
if num_warmup_steps:
a = WarmUp(
initial_learning_rate=snake_case_, decay_schedule_fn=snake_case_, warmup_steps=snake_case_, )
if weight_decay_rate > 0.0:
a = AdamWeightDecay(
learning_rate=snake_case_, weight_decay_rate=snake_case_, beta_a=snake_case_, beta_a=snake_case_, epsilon=snake_case_, clipnorm=snake_case_, global_clipnorm=snake_case_, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=snake_case_, )
else:
a = tf.keras.optimizers.Adam(
learning_rate=snake_case_, beta_a=snake_case_, beta_a=snake_case_, epsilon=snake_case_, clipnorm=snake_case_, global_clipnorm=snake_case_, )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowerCamelCase_ ( a_ ):
def __init__( self : Any ,__lowerCamelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 ,__lowerCamelCase : float = 0.9 ,__lowerCamelCase : float = 0.999 ,__lowerCamelCase : float = 1e-7 ,__lowerCamelCase : bool = False ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : Optional[List[str]] = None ,__lowerCamelCase : Optional[List[str]] = None ,__lowerCamelCase : str = "AdamWeightDecay" ,**__lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase )
a = weight_decay_rate
a = include_in_weight_decay
a = exclude_from_weight_decay
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ,__lowerCamelCase : Any ):
'''simple docstring'''
a = {'''WarmUp''': WarmUp}
return super(__lowerCamelCase ,cls ).from_config(__lowerCamelCase ,custom_objects=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
super(__lowerCamelCase ,self )._prepare_local(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
a = tf.constant(
self.weight_decay_rate ,name='''adam_weight_decay_rate''' )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ):
'''simple docstring'''
a = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] ,use_locking=self._use_locking ,)
return tf.no_op()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : Dict=None ,**__lowerCamelCase : int ):
'''simple docstring'''
a , a = list(zip(*__lowerCamelCase ) )
return super(__lowerCamelCase ,self ).apply_gradients(zip(__lowerCamelCase ,__lowerCamelCase ) ,name=__lowerCamelCase ,**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Any ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Dict ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
a = apply_state or {}
a = apply_state.get((var_device, var_dtype) )
if coefficients is None:
a = self._fallback_apply_state(__lowerCamelCase ,__lowerCamelCase )
a = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Tuple=None ):
'''simple docstring'''
a , a = self._get_lr(var.device ,var.dtype.base_dtype ,__lowerCamelCase )
a = self._decay_weights_op(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCamelCase ,self )._resource_apply_dense(__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict ,__lowerCamelCase : List[str]=None ):
'''simple docstring'''
a , a = self._get_lr(var.device ,var.dtype.base_dtype ,__lowerCamelCase )
a = self._decay_weights_op(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCamelCase ,self )._resource_apply_sparse(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : int ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__lowerCamelCase ,__lowerCamelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__lowerCamelCase ,__lowerCamelCase ) is not None:
return False
return True
class lowerCamelCase_ ( a_ ):
def __init__( self : Optional[int] ):
'''simple docstring'''
a = []
a = None
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
if self._accum_steps is None:
a = tf.Variable(
tf.constant(0 ,dtype=tf.intaa ) ,trainable=__lowerCamelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
return self._accum_steps.value()
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : str ,__lowerCamelCase : List[str] ):
'''simple docstring'''
if not self._gradients:
a = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__lowerCamelCase ) ,trainable=__lowerCamelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__lowerCamelCase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(__lowerCamelCase )}""" )
for accum_gradient, gradient in zip(self._gradients ,__lowerCamelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__lowerCamelCase )
self._accum_steps.assign_add(1 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__lowerCamelCase ) )
| 387
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 713
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowercase = ''
else:
lowercase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowercase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[
: config.hidden_size, :
]
lowercase = in_proj_bias[: config.hidden_size]
lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase = in_proj_weight[
-config.hidden_size :, :
]
lowercase = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase = val
def UpperCAmelCase_ ( ):
lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = ViTConfig()
lowercase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowercase = True
lowercase = int(vit_name[-12:-10] )
lowercase = int(vit_name[-9:-6] )
else:
lowercase = 1000
lowercase = 'huggingface/label-files'
lowercase = 'imagenet-1k-id2label.json'
lowercase = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = int(vit_name[-6:-4] )
lowercase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
lowercase = 192
lowercase = 768
lowercase = 12
lowercase = 3
elif vit_name[9:].startswith('small' ):
lowercase = 384
lowercase = 1536
lowercase = 12
lowercase = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
lowercase = 768
lowercase = 2304
lowercase = 8
lowercase = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
lowercase = 1024
lowercase = 4096
lowercase = 24
lowercase = 16
elif vit_name[4:].startswith('huge' ):
lowercase = 1280
lowercase = 5120
lowercase = 32
lowercase = 16
# load original model from timm
lowercase = timm.create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase = timm_model.state_dict()
if base_model:
remove_classification_head_(__SCREAMING_SNAKE_CASE )
lowercase = create_rename_keys(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase = ViTModel(__SCREAMING_SNAKE_CASE ).eval()
else:
lowercase = ViTForImageClassification(__SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowercase = DeiTImageProcessor(size=config.image_size )
else:
lowercase = ViTImageProcessor(size=config.image_size )
lowercase = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase = encoding['pixel_values']
lowercase = model(__SCREAMING_SNAKE_CASE )
if base_model:
lowercase = timm_model.forward_features(__SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1e-3 )
else:
lowercase = timm_model(__SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 565
| 0
|
import numpy as np
def lowerCamelCase__ ( a : np.ndarray , a : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , a , (alpha * (np.exp(a ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 395
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
snake_case__ = logging.get_logger(__name__)
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
# Get the sagemaker specific mp parameters from smp_options variable.
a__ :str = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
a__ :str = json.loads(a )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
a__ :Dict = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
a__ :str = json.loads(a )
if not mpi_options.get("sagemaker_mpi_enabled" , a ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = field(
default='' ,metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} ,)
def _snake_case ( self : List[str] ) ->int:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , __A , )
@cached_property
def _snake_case ( self : List[Any] ) ->"torch.device":
"""simple docstring"""
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
a__ :str = torch.device("cpu" )
a__ :Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
a__ :Union[str, Any] = smp.local_rank()
a__ :Tuple = torch.device("cuda" , __A )
a__ :Any = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
a__ :Optional[Any] = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
a__ :Any = torch.device("cuda" , self.local_rank )
a__ :List[Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
a__ :Optional[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
a__ :Any = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
a__ :List[Any] = torch.device("cuda" , self.local_rank )
a__ :Union[str, Any] = 1
if device.type == "cuda":
torch.cuda.set_device(__A )
return device
@property
def _snake_case ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _snake_case ( self : int ) ->Dict:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def _snake_case ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
return False
| 395
| 1
|
import math
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = []
snake_case_ = 2
snake_case_ = int(math.sqrt(lowercase__ ) ) # Size of every segment
snake_case_ = [True] * (end + 1)
snake_case_ = []
while start <= end:
if temp[start] is True:
in_prime.append(lowercase__ )
for i in range(start * start , end + 1 , lowercase__ ):
snake_case_ = False
start += 1
prime += in_prime
snake_case_ = end + 1
snake_case_ = min(2 * end , lowercase__ )
while low <= n:
snake_case_ = [True] * (high - low + 1)
for each in in_prime:
snake_case_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowercase__ , high + 1 , lowercase__ ):
snake_case_ = False
for j in range(len(lowercase__ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ = high + 1
snake_case_ = min(high + end , lowercase__ )
return prime
print(sieve(10**6))
| 46
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """unispeech-sat"""
def __init__( self , __UpperCamelCase=32 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase="group" , __UpperCamelCase="gelu" , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase=False , __UpperCamelCase=1_28 , __UpperCamelCase=16 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.05 , __UpperCamelCase=10 , __UpperCamelCase=2 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=0 , __UpperCamelCase=3_20 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , __UpperCamelCase=1_00 , __UpperCamelCase=2_56 , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase="mean" , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=2_56 , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 15_00) , __UpperCamelCase=(5, 3, 3, 1, 1) , __UpperCamelCase=(1, 2, 3, 1, 1) , __UpperCamelCase=5_12 , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=5_04 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = num_clusters
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = feat_quantizer_dropout
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = xvector_output_dim
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 46
| 1
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCamelCase = False
class _a ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
"""simple docstring"""
a__ : str = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a__ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Dict = torch.manual_seed(0 )
a__ : int = pipe(
image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
a__ : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
a__ : Tuple = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 191
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 191
| 1
|
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
UpperCAmelCase_ : int = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCAmelCase_ : Optional[Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
def lowerCamelCase__ ( _lowercase = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 300
| 0
|
import os
import numpy
import onnx
def lowercase ( __A : str , __A : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = a.name
snake_case : int = b.name
snake_case : Tuple = """"""
snake_case : Any = """"""
snake_case : Optional[int] = a == b
snake_case : Any = name_a
snake_case : str = name_b
return res
def lowercase ( __A : Optional[int] , __A : List[Any] , __A : List[Any] ) -> int:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__A , __A )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __A , __A )
_graph_replace_input_with(node_proto.attribute[1].g , __A , __A )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __A , __A )
def lowercase ( __A : Tuple , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(__A , __A , __A )
def lowercase ( __A : Dict , __A : Any , __A : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = list(model.graph.initializer )
snake_case : Optional[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case : Optional[int] = inits[i].name
snake_case : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __A , __A )
def lowercase ( __A : Tuple ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = os.path.dirname(__A )
snake_case : Union[str, Any] = os.path.basename(__A )
snake_case : Dict = onnx.load(os.path.join(__A , __A ) )
snake_case : Optional[Any] = list(model.graph.initializer )
snake_case : Optional[Any] = set()
snake_case : Optional[int] = {}
snake_case : Optional[int] = []
snake_case : List[str] = 0
for i in range(len(__A ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__A ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__A )
dup_set.add(__A )
snake_case : Optional[Any] = inits[j].data_type
snake_case : Any = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , __A )
total_reduced_size += mem_size
snake_case : Tuple = inits[i].name
snake_case : Optional[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__A )
else:
snake_case : int = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
snake_case : Union[str, Any] = sorted(__A )
_remove_dup_initializers_from_model(__A , __A , __A )
snake_case : List[str] = """optimized_""" + model_file_name
snake_case : List[Any] = os.path.join(__A , __A )
onnx.save(__A , __A )
return new_model
| 36
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[str] = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = ["""pixel_values"""]
def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
__magic_name__ = size if size is not None else {"""shortest_edge""": 256}
__magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
__magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__magic_name__ = get_size_dict(UpperCamelCase__ )
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = resample
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_normalize
__magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray:
"""simple docstring"""
__magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray:
"""simple docstring"""
__magic_name__ = get_size_dict(UpperCamelCase__ )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray:
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict:
"""simple docstring"""
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = size if size is not None else self.size
__magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
__magic_name__ = resample if resample is not None else self.resample
__magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ = crop_size if crop_size is not None else self.crop_size
__magic_name__ = get_size_dict(UpperCamelCase__ )
__magic_name__ = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ = image_mean if image_mean is not None else self.image_mean
__magic_name__ = image_std if image_std is not None else self.image_std
__magic_name__ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
__magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
__magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
__magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
__magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
__magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
__magic_name__ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 76
| 0
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__lowercase : Union[str, Any] = {'''UserAgent''': UserAgent().random}
def lowercase ( __A : Optional[Any] ) -> dict:
'''simple docstring'''
snake_case : str = script.contents[0]
snake_case : List[str] = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Tuple = F"""https://www.instagram.com/{username}/"""
snake_case : List[Any] = self.get_json()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = requests.get(self.url ,headers=SCREAMING_SNAKE_CASE_ ).text
snake_case : int = BeautifulSoup(SCREAMING_SNAKE_CASE_ ,"""html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowercase ( __A : str = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
snake_case : List[str] = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase : int = InstagramUser('''github''')
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 36
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 413
| 0
|
def _a ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__UpperCamelCase , (list, tuple) ) or not all(
isinstance(__UpperCamelCase , __UpperCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase__ : List[Any] = numbers[0]
for i in range(1 , len(__UpperCamelCase ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : Dict = numbers[i]
if number < 0:
lowerCamelCase__ : Optional[int] = min_till_now, max_till_now
lowerCamelCase__ : List[str] = max(__UpperCamelCase , max_till_now * number )
lowerCamelCase__ : Any = min(__UpperCamelCase , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : Tuple = max(__UpperCamelCase , __UpperCamelCase )
return max_prod
| 721
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A : Optional[Any] = logging.get_logger(__name__)
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Tuple = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
lowerCamelCase__ : Tuple = 1024
lowerCamelCase__ : Any = 4096
lowerCamelCase__ : Optional[Any] = 24
lowerCamelCase__ : Dict = 16
lowerCamelCase__ : Optional[Any] = [5, 11, 17, 23]
lowerCamelCase__ : str = [256, 512, 1024, 1024]
lowerCamelCase__ : List[str] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ : List[str] = 768
lowerCamelCase__ : Any = [1, 1, 1, 0.5]
lowerCamelCase__ : Dict = [256, 512, 768, 768]
lowerCamelCase__ : Dict = 150
lowerCamelCase__ : str = 16
lowerCamelCase__ : List[Any] = (1, 384, 384)
lowerCamelCase__ : Any = False
lowerCamelCase__ : int = '''project'''
if "ade" in checkpoint_url:
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : List[Any] = 768
lowerCamelCase__ : int = [1, 1, 1, 0.5]
lowerCamelCase__ : Any = 150
lowerCamelCase__ : Dict = 16
lowerCamelCase__ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase__ : Any = '''ade20k-id2label.json'''
lowerCamelCase__ : Optional[Any] = json.load(open(cached_download(hf_hub_url(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowerCamelCase__ : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Any = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Optional[Any] = [1, 150, 480, 480]
return config, expected_shape
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ : Optional[int] = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
lowerCamelCase__ : Tuple = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
lowerCamelCase__ : int = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
lowerCamelCase__ : List[Any] = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
lowerCamelCase__ : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
lowerCamelCase__ : Any = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
lowerCamelCase__ : List[Any] = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
lowerCamelCase__ : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : List[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ : Any = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
lowerCamelCase__ : Tuple = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
lowerCamelCase__ : int = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
lowerCamelCase__ : Any = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
lowerCamelCase__ : Optional[int] = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
lowerCamelCase__ : Tuple = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
lowerCamelCase__ : Optional[Any] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ : List[str] = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowerCamelCase__ : str = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
lowerCamelCase__ : List[str] = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
lowerCamelCase__ : Optional[int] = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
lowerCamelCase__ : int = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
lowerCamelCase__ : List[str] = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ : Dict = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ : str = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ : Optional[int] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ : Dict = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ : str = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ : int = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ : Optional[int] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ : Dict = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ : Tuple = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
lowerCamelCase__ : Any = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
lowerCamelCase__ : List[str] = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
lowerCamelCase__ : List[Any] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
lowerCamelCase__ : List[str] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
lowerCamelCase__ : Optional[Any] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
lowerCamelCase__ : str = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowerCamelCase__ : List[Any] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ : Tuple = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
lowerCamelCase__ : int = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ : int = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : Dict = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowerCamelCase__ : Optional[int] = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ : str = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[Any] = in_proj_bias[-config.hidden_size :]
def _a ( ) -> str:
"""simple docstring"""
lowerCamelCase__ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : Optional[Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : str = get_dpt_config(UpperCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ : int = torch.load(UpperCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ : Union[str, Any] = state_dict.pop(UpperCAmelCase )
lowerCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase )
# load HuggingFace model
lowerCamelCase__ : Optional[Any] = DPTForSemanticSegmentation(UpperCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ : List[str] = 480 if '''ade''' in checkpoint_url else 384
lowerCamelCase__ : List[Any] = DPTImageProcessor(size=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = prepare_img()
lowerCamelCase__ : List[str] = image_processor(UpperCAmelCase , return_tensors='''pt''' )
# forward pass
lowerCamelCase__ : Tuple = model(**UpperCAmelCase ).logits if '''ade''' in checkpoint_url else model(**UpperCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ : Union[str, Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=UpperCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
_A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
_A : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 130
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class _A ( lowerCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
snake_case__ : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
snake_case__ : ClassVar[Features] = Features({'text': Value('string' )} )
snake_case__ : ClassVar[Features] = Features({'labels': ClassLabel} )
snake_case__ : str = "text"
snake_case__ : str = "labels"
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
lowercase = copy.deepcopy(self )
lowercase = self.label_schema.copy()
lowercase = features[self.label_column]
lowercase = label_schema
return task_template
@property
def A__ ( self ):
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 359
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 5_0 ) -> int:
'''simple docstring'''
lowercase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 359
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=False ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Optional[int] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_ : int =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ : Optional[int] =''''''
else:
SCREAMING_SNAKE_CASE_ : List[str] ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ : str =state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE_ : Dict =state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : int =in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ : Optional[int] =in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ : Optional[Any] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ : Tuple =in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ : Optional[int] =in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : str =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE_ : Any =dct.pop(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =val
def SCREAMING_SNAKE_CASE_ ( ) -> int:
SCREAMING_SNAKE_CASE_ : List[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] =Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=True ) -> str:
SCREAMING_SNAKE_CASE_ : List[str] =ViTConfig()
# patch_size
if model_name[-1] == "8":
SCREAMING_SNAKE_CASE_ : int =8
# set labels if required
if not base_model:
SCREAMING_SNAKE_CASE_ : str =1_0_0_0
SCREAMING_SNAKE_CASE_ : Tuple ='''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ : Dict =json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] ={int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Union[str, Any] =idalabel
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
SCREAMING_SNAKE_CASE_ : List[Any] =3_8_4
SCREAMING_SNAKE_CASE_ : List[Any] =1_5_3_6
SCREAMING_SNAKE_CASE_ : Union[str, Any] =1_2
SCREAMING_SNAKE_CASE_ : List[str] =6
# load original model from torch hub
SCREAMING_SNAKE_CASE_ : Tuple =torch.hub.load('''facebookresearch/dino:main''' , UpperCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_ : Optional[Any] =original_model.state_dict()
if base_model:
remove_classification_head_(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : int =create_rename_keys(UpperCAmelCase_ , base_model=UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
if base_model:
SCREAMING_SNAKE_CASE_ : Optional[int] =ViTModel(UpperCAmelCase_ , add_pooling_layer=UpperCAmelCase_ ).eval()
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =ViTForImageClassification(UpperCAmelCase_ ).eval()
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
SCREAMING_SNAKE_CASE_ : Optional[int] =ViTImageProcessor()
SCREAMING_SNAKE_CASE_ : Optional[Any] =image_processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : int =encoding['''pixel_values''']
SCREAMING_SNAKE_CASE_ : Any =model(UpperCAmelCase_ )
if base_model:
SCREAMING_SNAKE_CASE_ : Dict =original_model(UpperCAmelCase_ )
assert torch.allclose(UpperCAmelCase_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =original_model(UpperCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase_ , outputs.logits , atol=1E-3 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
_lowercase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 431
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( A ):
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , __A , __A ) -> List[Any]:
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self , __A = 1 , __A = 50 , __A = None , __A = "pil" , __A = True , **__A , ) -> Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.unet.config.sample_size
SCREAMING_SNAKE_CASE_ : Tuple =(batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE_ : Tuple =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
SCREAMING_SNAKE_CASE_ : Any =randn_tensor(__A , generator=__A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
SCREAMING_SNAKE_CASE_ : Optional[int] =self.scheduler.schedule[t]
SCREAMING_SNAKE_CASE_ : List[str] =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =self.scheduler.add_noise_to_input(__A , __A , generator=__A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE_ : Dict =(sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
SCREAMING_SNAKE_CASE_ : int =self.scheduler.step(__A , __A , __A , __A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE_ : Tuple =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.scheduler.step_correct(
__A , __A , __A , __A , step_output.prev_sample , step_output['''derivative'''] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =step_output.prev_sample
SCREAMING_SNAKE_CASE_ : Tuple =(sample / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : Optional[int] =self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 431
| 1
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
UpperCamelCase_ = Image.open(requests.get(__lowercase , stream=__lowercase).raw).convert('RGB')
UpperCamelCase_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711)),
])
UpperCamelCase_ = transform(__lowercase).unsqueeze(0).to(__lowercase)
return image
def _snake_case (__lowercase):
if "visual_encoder" in key:
UpperCamelCase_ = re.sub('visual_encoder*' , 'vision_model.encoder' , __lowercase)
if "blocks" in key:
UpperCamelCase_ = re.sub(r'blocks' , 'layers' , __lowercase)
if "attn" in key:
UpperCamelCase_ = re.sub(r'attn' , 'self_attn' , __lowercase)
if "norm1" in key:
UpperCamelCase_ = re.sub(r'norm1' , 'layer_norm1' , __lowercase)
if "norm2" in key:
UpperCamelCase_ = re.sub(r'norm2' , 'layer_norm2' , __lowercase)
if "encoder.norm" in key:
UpperCamelCase_ = re.sub(r'encoder.norm' , 'post_layernorm' , __lowercase)
if "encoder.patch_embed.proj" in key:
UpperCamelCase_ = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , __lowercase)
if "encoder.pos_embed" in key:
UpperCamelCase_ = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , __lowercase)
if "encoder.cls_token" in key:
UpperCamelCase_ = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , __lowercase)
if "self_attn" in key:
UpperCamelCase_ = re.sub(r'self_attn.proj' , 'self_attn.projection' , __lowercase)
return key
@torch.no_grad()
def _snake_case (__lowercase , __lowercase=None):
if config_path is not None:
UpperCamelCase_ = BlipConfig.from_pretrained(__lowercase)
else:
UpperCamelCase_ = BlipConfig(projection_dim=512 , text_config={} , vision_config={})
UpperCamelCase_ = BlipForConditionalGeneration(__lowercase).eval()
UpperCamelCase_ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
UpperCamelCase_ = blip_decoder(pretrained=__lowercase , image_size=384 , vit='base')
UpperCamelCase_ = pt_model.eval()
UpperCamelCase_ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase_ = modified_state_dict.pop(__lowercase)
UpperCamelCase_ = rename_key(__lowercase)
UpperCamelCase_ = value
hf_model.load_state_dict(__lowercase)
UpperCamelCase_ = 384
UpperCamelCase_ = load_demo_image(image_size=__lowercase , device='cpu')
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased')
UpperCamelCase_ = tokenizer(['a picture of']).input_ids
UpperCamelCase_ = hf_model.generate(__lowercase , __lowercase)
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCamelCase_ = hf_model.generate(__lowercase)
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__lowercase)
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCamelCase_ = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
UpperCamelCase_ = blip_vqa(pretrained=__lowercase , image_size=__lowercase , vit='base')
vqa_model.eval()
UpperCamelCase_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase_ = modified_state_dict.pop(__lowercase)
UpperCamelCase_ = rename_key(__lowercase)
UpperCamelCase_ = value
UpperCamelCase_ = BlipForQuestionAnswering(__lowercase)
hf_vqa_model.load_state_dict(__lowercase)
UpperCamelCase_ = ['How many dogs are in this image?']
UpperCamelCase_ = tokenizer(__lowercase , return_tensors='pt').input_ids
UpperCamelCase_ = hf_vqa_model.generate(__lowercase , __lowercase)
print(tokenizer.decode(answer[0]))
assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa')
UpperCamelCase_ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
UpperCamelCase_ = blip_itm(pretrained=__lowercase , image_size=__lowercase , vit='base')
itm_model.eval()
UpperCamelCase_ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase_ = modified_state_dict.pop(__lowercase)
UpperCamelCase_ = rename_key(__lowercase)
UpperCamelCase_ = value
UpperCamelCase_ = BlipForImageTextRetrieval(__lowercase)
UpperCamelCase_ = ['A picture of a woman with a dog sitting in a beach']
UpperCamelCase_ = tokenizer(
__lowercase , return_tensors='pt' , padding='max_length' , truncation=__lowercase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__lowercase)
hf_itm_model.eval()
UpperCamelCase_ = hf_itm_model(__lowercase , __lowercase , use_itm_head=__lowercase)
UpperCamelCase_ = hf_itm_model(__lowercase , __lowercase , use_itm_head=__lowercase)
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1)[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm')
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
snake_case__ : Optional[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 23
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = jnp.floataa
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
super().setup()
lowerCamelCase__: int =nn.Dense(5 , dtype=self.dtype)
def __call__(self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: int =self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = FlaxBigBirdForNaturalQuestionsModule
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
def cross_entropy(__a , __a , __a=None ):
lowerCamelCase__: Tuple =logits.shape[-1]
lowerCamelCase__: Tuple =(labels[..., None] == jnp.arange(__a )[None]).astype("f4" )
lowerCamelCase__: str =jax.nn.log_softmax(__a , axis=-1 )
lowerCamelCase__: Optional[Any] =-jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCamelCase__: Optional[Any] =reduction(__a )
return loss
lowerCamelCase__: str =partial(__a , reduction=jnp.mean )
lowerCamelCase__: str =cross_entropy(__a , __a )
lowerCamelCase__: Optional[int] =cross_entropy(__a , __a )
lowerCamelCase__: Optional[Any] =cross_entropy(__a , __a )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = "google/bigbird-roberta-base"
lowercase_ = 3000
lowercase_ = 1_0500
lowercase_ = 128
lowercase_ = 3
lowercase_ = 1
lowercase_ = 5
# tx_args
lowercase_ = 3E-5
lowercase_ = 0.0
lowercase_ = 2_0000
lowercase_ = 0.0095
lowercase_ = "bigbird-roberta-natural-questions"
lowercase_ = "training-expt"
lowercase_ = "data/nq-training.jsonl"
lowercase_ = "data/nq-validation.jsonl"
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =os.path.join(self.base_dir , self.save_dir)
lowerCamelCase__: List[str] =self.batch_size_per_device * jax.device_count()
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42
lowercase_ = 4096 # no dynamic padding on TPUs
def __call__(self : List[Any] , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.collate_fn(UpperCAmelCase_)
lowerCamelCase__: List[Any] =jax.tree_util.tree_map(UpperCAmelCase_ , UpperCAmelCase_)
return batch
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.fetch_inputs(features["input_ids"])
lowerCamelCase__: Union[str, Any] ={
"input_ids": jnp.array(UpperCAmelCase_ , dtype=jnp.intaa),
"attention_mask": jnp.array(UpperCAmelCase_ , dtype=jnp.intaa),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa),
}
return batch
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : list) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self._fetch_inputs(UpperCAmelCase_) for ids in input_ids]
return zip(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : list) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[1 for _ in range(len(UpperCAmelCase_))]
while len(UpperCAmelCase_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def lowerCAmelCase_ ( __a , __a , __a=None ) -> str:
"""simple docstring"""
if seed is not None:
lowerCamelCase__: Any =dataset.shuffle(seed=__a )
for i in range(len(__a ) // batch_size ):
lowerCamelCase__: Any =dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__a )
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase_ ( __a , __a , **__a ) -> List[str]:
"""simple docstring"""
def loss_fn(__a ):
lowerCamelCase__: Optional[int] =model_inputs.pop("start_labels" )
lowerCamelCase__: int =model_inputs.pop("end_labels" )
lowerCamelCase__: List[str] =model_inputs.pop("pooled_labels" )
lowerCamelCase__: Optional[int] =state.apply_fn(**__a , params=__a , dropout_rng=__a , train=__a )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[Any] =outputs
return state.loss_fn(
__a , __a , __a , __a , __a , __a , )
lowerCamelCase__ , lowerCamelCase__: int =jax.random.split(__a )
lowerCamelCase__: Optional[Any] =jax.value_and_grad(__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =grad_fn(state.params )
lowerCamelCase__: Optional[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" )
lowerCamelCase__: List[str] =jax.lax.pmean(__a , "batch" )
lowerCamelCase__: List[str] =state.apply_gradients(grads=__a )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase_ ( __a , **__a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =model_inputs.pop("start_labels" )
lowerCamelCase__: List[str] =model_inputs.pop("end_labels" )
lowerCamelCase__: int =model_inputs.pop("pooled_labels" )
lowerCamelCase__: Optional[Any] =state.apply_fn(**__a , params=state.params , train=__a )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[str] =outputs
lowerCamelCase__: Optional[int] =state.loss_fn(__a , __a , __a , __a , __a , __a )
lowerCamelCase__: Optional[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class _SCREAMING_SNAKE_CASE ( train_state.TrainState ):
'''simple docstring'''
lowercase_ = struct.field(pytree_node=__SCREAMING_SNAKE_CASE )
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=None) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =model.params
lowerCamelCase__: Tuple =TrainState.create(
apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , loss_fn=UpperCAmelCase_ , )
if ckpt_dir is not None:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =restore_checkpoint(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple ={
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
lowerCamelCase__ , lowerCamelCase__: List[Any] =build_tx(**UpperCAmelCase_)
lowerCamelCase__: str =train_state.TrainState(
step=UpperCAmelCase_ , apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , opt_state=UpperCAmelCase_ , )
lowerCamelCase__: Tuple =args
lowerCamelCase__: Tuple =data_collator
lowerCamelCase__: str =lr
lowerCamelCase__: Dict =params
lowerCamelCase__: List[str] =jax_utils.replicate(UpperCAmelCase_)
return state
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.args
lowerCamelCase__: Any =len(UpperCAmelCase_) // args.batch_size
lowerCamelCase__: List[str] =jax.random.PRNGKey(0)
lowerCamelCase__: Optional[Any] =jax.random.split(UpperCAmelCase_ , jax.device_count())
for epoch in range(args.max_epochs):
lowerCamelCase__: Union[str, Any] =jnp.array(0 , dtype=jnp.floataa)
lowerCamelCase__: str =get_batched_dataset(UpperCAmelCase_ , args.batch_size , seed=UpperCAmelCase_)
lowerCamelCase__: Dict =0
for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc=F"""Running EPOCH-{epoch}"""):
lowerCamelCase__: List[str] =self.data_collator(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.train_step_fn(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
if i % args.logging_steps == 0:
lowerCamelCase__: Optional[int] =jax_utils.unreplicate(state.step)
lowerCamelCase__: List[Any] =running_loss.item() / i
lowerCamelCase__: Tuple =self.scheduler_fn(state_step - 1)
lowerCamelCase__: Union[str, Any] =self.evaluate(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Dict ={
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(UpperCAmelCase_))
self.logger.log(UpperCAmelCase_ , commit=UpperCAmelCase_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =get_batched_dataset(UpperCAmelCase_ , self.args.batch_size)
lowerCamelCase__: List[str] =len(UpperCAmelCase_) // self.args.batch_size
lowerCamelCase__: str =jnp.array(0 , dtype=jnp.floataa)
lowerCamelCase__: Optional[Any] =0
for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc="Evaluating ... "):
lowerCamelCase__: int =self.data_collator(UpperCAmelCase_)
lowerCamelCase__: str =self.val_step_fn(UpperCAmelCase_ , **UpperCAmelCase_)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
return running_loss / i
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]) ->int:
'''simple docstring'''
lowerCamelCase__: Any =jax_utils.unreplicate(UpperCAmelCase_)
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... ")
self.model_save_fn(UpperCAmelCase_ , params=state.params)
with open(os.path.join(UpperCAmelCase_ , "opt_state.msgpack") , "wb") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(UpperCAmelCase_ , "args.joblib"))
joblib.dump(self.data_collator , os.path.join(UpperCAmelCase_ , "data_collator.joblib"))
with open(os.path.join(UpperCAmelCase_ , "training_state.json") , "w") as f:
json.dump({"step": state.step.item()} , UpperCAmelCase_)
print("DONE")
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(__a , "flax_model.msgpack" ) , "rb" ) as f:
lowerCamelCase__: Tuple =from_bytes(state.params , f.read() )
with open(os.path.join(__a , "opt_state.msgpack" ) , "rb" ) as f:
lowerCamelCase__: Optional[int] =from_bytes(state.opt_state , f.read() )
lowerCamelCase__: Any =joblib.load(os.path.join(__a , "args.joblib" ) )
lowerCamelCase__: Union[str, Any] =joblib.load(os.path.join(__a , "data_collator.joblib" ) )
with open(os.path.join(__a , "training_state.json" ) , "r" ) as f:
lowerCamelCase__: Optional[Any] =json.load(__a )
lowerCamelCase__: Any =training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: int =num_train_steps - warmup_steps
lowerCamelCase__: str =optax.linear_schedule(init_value=__a , end_value=__a , transition_steps=__a )
lowerCamelCase__: Optional[Any] =optax.linear_schedule(init_value=__a , end_value=1e-7 , transition_steps=__a )
lowerCamelCase__: List[Any] =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> str:
"""simple docstring"""
def weight_decay_mask(__a ):
lowerCamelCase__: List[str] =traverse_util.flatten_dict(__a )
lowerCamelCase__: List[str] ={k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(__a )
lowerCamelCase__: Optional[Any] =scheduler_fn(__a , __a , __a , __a )
lowerCamelCase__: Tuple =optax.adamw(learning_rate=__a , weight_decay=__a , mask=__a )
return tx, lr
| 59
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def UpperCAmelCase ( ):
_lowerCAmelCase:str = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase:Dict = get_sagemaker_input()
else:
_lowerCAmelCase:Tuple = get_cluster_input()
return config
def UpperCAmelCase ( snake_case : List[str]=None ):
if subparsers is not None:
_lowerCAmelCase:List[Any] = subparsers.add_parser('''config''' , description=_A )
else:
_lowerCAmelCase:Union[str, Any] = argparse.ArgumentParser('''Accelerate config command''' , description=_A )
parser.add_argument(
'''--config_file''' , default=_A , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def UpperCAmelCase ( snake_case : Union[str, Any] ):
_lowerCAmelCase:Optional[Any] = get_user_input()
if args.config_file is not None:
_lowerCAmelCase:List[str] = args.config_file
else:
if not os.path.isdir(_A ):
os.makedirs(_A )
_lowerCAmelCase:Optional[Any] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_A )
else:
config.to_yaml_file(_A )
print(F'accelerate configuration saved at {config_file}' )
def UpperCAmelCase ( ):
_lowerCAmelCase:List[str] = config_command_parser()
_lowerCAmelCase:Dict = parser.parse_args()
config_command(_A )
if __name__ == "__main__":
main()
| 709
|
"""simple docstring"""
def UpperCAmelCase ( snake_case : str , snake_case : str ):
assert x is not None
assert y is not None
_lowerCAmelCase:Optional[Any] = len(snake_case )
_lowerCAmelCase:Tuple = len(snake_case )
# declaring the array for storing the dp values
_lowerCAmelCase:Optional[Any] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_lowerCAmelCase:Optional[int] = 1 if x[i - 1] == y[j - 1] else 0
_lowerCAmelCase:int = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_lowerCAmelCase:List[Any] = ''''''
_lowerCAmelCase , _lowerCAmelCase:Dict = m, n
while i > 0 and j > 0:
_lowerCAmelCase:Any = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_lowerCAmelCase:Any = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCamelCase__ = '''AGGTAB'''
UpperCamelCase__ = '''GXTXAYB'''
UpperCamelCase__ = 4
UpperCamelCase__ = '''GTAB'''
UpperCamelCase__ , UpperCamelCase__ = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 439
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_A : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 2_55 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 2_24}
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE__ = do_convert_rgb
def lowercase_ ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ = get_resize_output_image_size(UpperCamelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowercase_ ( self , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowercase_ ( self , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowercase_ ( self , A_ , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowercase_ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCamelCase_ , param_name='''size''' , default_to_square=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' , default_to_square=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE__ = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE__ = [convert_to_rgb(UpperCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 100
|
'''simple docstring'''
a__ : Optional[Any] = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def __lowerCamelCase ( UpperCAmelCase_ ) ->int:
snake_case__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCAmelCase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCAmelCase_ )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](UpperCAmelCase_ , UpperCAmelCase_ )
operand_stack.push(UpperCAmelCase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a__ : Any = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 368
| 0
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
a__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
a__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
a__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __UpperCAmelCase ( __a : str ,__a : str ) -> tuple[str, float]:
"""simple docstring"""
_a : Any = len([g for position, g in enumerate(__a ) if g == main_target[position]] )
return (item, float(__a ))
def __UpperCAmelCase ( __a : str ,__a : str ) -> tuple[str, str]:
"""simple docstring"""
_a : Dict = random.randint(0 ,len(__a ) - 1 )
_a : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_a : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __UpperCAmelCase ( __a : str ,__a : list[str] ) -> str:
"""simple docstring"""
_a : Any = list(__a )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_a : str = random.choice(__a )
return "".join(__a )
def __UpperCAmelCase ( __a : tuple[str, float] ,__a : list[tuple[str, float]] ,__a : list[str] ,) -> list[str]:
"""simple docstring"""
_a : Tuple = []
# Generate more children proportionally to the fitness score.
_a : Union[str, Any] = int(parent_a[1] * 100 ) + 1
_a : Tuple = 10 if child_n >= 10 else child_n
for _ in range(__a ):
_a : str = population_score[random.randint(0 ,__a )][0]
_a , _a : List[str] = crossover(parent_a[0] ,__a )
# Append new string to the population list.
pop.append(mutate(__a ,__a ) )
pop.append(mutate(__a ,__a ) )
return pop
def __UpperCAmelCase ( __a : str ,__a : list[str] ,__a : bool = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_a : Tuple = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(__a )
# Verify that the target contains no genes besides the ones inside genes variable.
_a : Tuple = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_a : Union[str, Any] = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(__a )
# Generate random starting population.
_a : Union[str, Any] = []
for _ in range(__a ):
population.append(''''''.join([random.choice(__a ) for i in range(len(__a ) )] ) )
# Just some logs to know what the algorithms is doing.
_a , _a : Optional[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__a )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_a : List[Any] = [evaluate(__a ,__a ) for item in population]
# Check if there is a matching evolution.
_a : List[Any] = sorted(__a ,key=lambda __a : x[1] ,reverse=__a )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_a : List[str] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__a )
# Normalize population score to be between 0 and 1.
_a : int = [
(item, score / len(__a )) for item, score in population_score
]
# This is selection
for i in range(__a ):
population.extend(select(population_score[int(__a )] ,__a ,__a ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__a ) > N_POPULATION:
break
if __name__ == "__main__":
a__ = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
a__ = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
a__ , a__ , a__ = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 578
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = MobileBertTokenizer
UpperCAmelCase__ : List[Any] = MobileBertTokenizerFast
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Any = filter_non_english
UpperCAmelCase__ : str = "google/mobilebert-uncased"
def __lowercase ( self ) -> Optional[Any]:
super().setUp()
_a : Optional[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : Tuple = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __lowercase ( self , _a ) -> Optional[Any]:
_a : Dict = '''UNwant\u00E9d,running'''
_a : Any = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> Any:
_a : int = self.tokenizer_class(self.vocab_file )
_a : Optional[int] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowercase ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_a : Dict = self.get_tokenizer()
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Optional[int] = '''UNwant\u00E9d,running'''
_a : List[str] = tokenizer.tokenize(_a )
_a : Tuple = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : str = tokenizer.encode(_a , add_special_tokens=_a )
_a : Optional[int] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : List[Any] = self.get_rust_tokenizer()
_a : Tuple = tokenizer.encode(_a )
_a : Any = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
_a : Union[str, Any] = self.get_tokenizer(do_lower_case=_a )
_a : Optional[int] = self.get_rust_tokenizer(do_lower_case=_a )
_a : Dict = '''UNwant\u00E9d,running'''
_a : Union[str, Any] = tokenizer.tokenize(_a )
_a : Tuple = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : int = tokenizer.encode(_a , add_special_tokens=_a )
_a : List[str] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Any = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_a )
_a : Tuple = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
_a : Any = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowercase ( self ) -> str:
_a : Union[str, Any] = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __lowercase ( self ) -> int:
_a : Tuple = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowercase ( self ) -> Optional[int]:
_a : Dict = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowercase ( self ) -> List[Any]:
_a : Optional[int] = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowercase ( self ) -> Any:
_a : Any = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowercase ( self ) -> Tuple:
_a : Dict = BasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowercase ( self ) -> Optional[int]:
_a : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_a : str = {}
for i, token in enumerate(_a ):
_a : Any = i
_a : Any = WordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __lowercase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowercase ( self ) -> Any:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowercase ( self ) -> str:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __lowercase ( self ) -> List[str]:
_a : int = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __lowercase ( self ) -> int:
_a : Any = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
_a : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
_a : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
_a : List[str] = tokenizer.build_inputs_with_special_tokens(_a )
_a : int = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __lowercase ( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : Optional[int] = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_a : Optional[int] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_a : Union[str, Any] = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
_a : Dict = tokenizer_r.do_lower_case if hasattr(_a , '''do_lower_case''' ) else False
_a : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __lowercase ( self ) -> Tuple:
_a : List[str] = ['''的''', '''人''', '''有''']
_a : Dict = ''''''.join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : List[Any] = True
_a : Tuple = self.tokenizer_class.from_pretrained(_a , **_a )
_a : Optional[int] = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_a : str = tokenizer_p.encode(_a , add_special_tokens=_a )
_a : Any = tokenizer_r.encode(_a , add_special_tokens=_a )
_a : List[Any] = tokenizer_r.convert_ids_to_tokens(_a )
_a : Dict = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
_a : Dict = False
_a : Any = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_a : str = self.tokenizer_class.from_pretrained(_a , **_a )
_a : Tuple = tokenizer_r.encode(_a , add_special_tokens=_a )
_a : Any = tokenizer_p.encode(_a , add_special_tokens=_a )
_a : Optional[int] = tokenizer_r.convert_ids_to_tokens(_a )
_a : int = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
_a : Optional[int] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 578
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 5
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
"""simple docstring"""
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
UpperCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCamelCase = 4
# running values
UpperCamelCase = []
def A ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
UpperCamelCase = (1.0 - self.betas**2) ** 0.5
UpperCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCamelCase = timesteps.to(UpperCamelCase__ )
UpperCamelCase = []
def A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
UpperCamelCase = (self.timesteps == timestep).nonzero().item()
UpperCamelCase = timestep_index + 1
UpperCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
UpperCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
UpperCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCamelCase = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
UpperCamelCase = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
UpperCamelCase = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def A ( self : List[str] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
return sample
def A ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = self.alphas[timestep_index]
UpperCamelCase = self.betas[timestep_index]
UpperCamelCase = self.alphas[prev_timestep_index]
UpperCamelCase = self.betas[prev_timestep_index]
UpperCamelCase = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
UpperCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 430
| 0
|
'''simple docstring'''
__UpperCAmelCase = {}
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE : Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE : List[Any] = _calculate(days - 1 , lowerCamelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE : Optional[int] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE : Any = _calculate(days - 1 , lowerCamelCase_ , 0 )
SCREAMING_SNAKE_CASE : Optional[Any] = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE : Optional[int] = prizestrings
return prizestrings
def __A ( lowerCamelCase_ = 30 ):
"""simple docstring"""
return _calculate(lowerCamelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 79
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Dict ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase_ ( cls : Any ):
'''simple docstring'''
return f'''`pip install {cls.pip_package or cls.name}`'''
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''optuna'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Dict ):
'''simple docstring'''
return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any ):
'''simple docstring'''
return default_hp_space_optuna(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''ray'''
SCREAMING_SNAKE_CASE__ = '''\'ray[tune]\''''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_ray_available()
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ):
'''simple docstring'''
return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
return default_hp_space_ray(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''sigopt'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ):
'''simple docstring'''
return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return default_hp_space_sigopt(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''wandb'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return default_hp_space_wandb(lowerCamelCase_ )
__UpperCAmelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = available_backends[0].name
if len(lowerCamelCase_ ) > 1:
logger.info(
f'''{len(lowerCamelCase_ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 79
| 1
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a (lowercase__ : List[Any] , lowercase__ : List[str] , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
# Load configuration defined in the metadata file
with open(lowercase__ ) as metadata_file:
__snake_case = json.load(lowercase__ )
__snake_case = LukeConfig(use_entity_aware_attention=lowercase__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__snake_case = torch.load(lowercase__ , map_location='cpu' )
# Load the entity vocab file
__snake_case = load_entity_vocab(lowercase__ )
__snake_case = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case = AddedToken('<ent>' , lstrip=lowercase__ , rstrip=lowercase__ )
__snake_case = AddedToken('<ent2>' , lstrip=lowercase__ , rstrip=lowercase__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
__snake_case = LukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
__snake_case = state_dict['embeddings.word_embeddings.weight']
__snake_case = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__snake_case = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__snake_case = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case = f'encoder.layer.{layer_index}.attention.self.'
__snake_case = state_dict[prefix + matrix_name]
__snake_case = state_dict[prefix + matrix_name]
__snake_case = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case = state_dict['entity_embeddings.entity_embeddings.weight']
__snake_case = entity_emb[entity_vocab['[MASK]']]
__snake_case = LukeModel(config=lowercase__ ).eval()
__snake_case , __snake_case = model.load_state_dict(lowercase__ , strict=lowercase__ )
if not (len(lowercase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(lowercase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
__snake_case = LukeTokenizer.from_pretrained(lowercase__ , task='entity_classification' )
__snake_case = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__snake_case = (3_9, 4_2)
__snake_case = tokenizer(lowercase__ , entity_spans=[span] , add_prefix_space=lowercase__ , return_tensors='pt' )
__snake_case = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
__snake_case = torch.Size((1, 4_2, 1_0_2_4) )
__snake_case = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
__snake_case = torch.Size((1, 4_2, 7_6_8) )
__snake_case = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__snake_case = torch.Size((1, 1, 1_0_2_4) )
__snake_case = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
__snake_case = torch.Size((1, 1, 7_6_8) )
__snake_case = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def _a (lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = {}
with open(lowercase__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(lowercase__ ):
__snake_case , __snake_case = line.rstrip().split('\t' )
__snake_case = index
return entity_vocab
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_a : Optional[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 56
|
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
A : int = TypeVar("""T""")
class lowerCAmelCase_ ( Generic[T] ):
def __init__( self : int, _snake_case : bool = True ):
'''simple docstring'''
snake_case : dict[T, list[T]] ={} # dictionary of lists
snake_case : Optional[int] =directed
def __snake_case ( self : Any, _snake_case : T, _snake_case : T ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
self.adj_list[destination_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
snake_case : Any =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_snake_case )
snake_case : int =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case : Union[str, Any] =[destination_vertex]
snake_case : str =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
snake_case : Optional[Any] =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case : Any =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case : int =[destination_vertex]
snake_case : Optional[Any] =[]
return self
def __repr__( self : int ):
'''simple docstring'''
return pformat(self.adj_list )
| 349
| 0
|
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase__ :List[str] = sys.version_info >= (3, 10)
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowerCAmelCase__ )
@dataclass
class lowercase :
lowercase_ : int
lowercase_ : float
lowercase_ : str
lowercase_ : bool
@dataclass
class lowercase :
lowercase_ : int =42
lowercase_ : str =field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class lowercase :
lowercase_ : bool =False
lowercase_ : bool =True
lowercase_ : Optional[bool] =None
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] ='''titi'''
lowercase_ : Union[str, Any] ='''toto'''
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str ='''titi'''
lowercase_ : int ='''toto'''
lowercase_ : Union[str, Any] =42
@dataclass
class lowercase :
lowercase_ : BasicEnum ="toto"
def A__ ( self):
lowercase = BasicEnum(self.foo)
@dataclass
class lowercase :
lowercase_ : MixedTypeEnum ="toto"
def A__ ( self):
lowercase = MixedTypeEnum(self.foo)
@dataclass
class lowercase :
lowercase_ : Optional[int] =None
lowercase_ : Optional[float] =field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''help message'''} )
lowercase_ : Optional[str] =None
lowercase_ : Optional[List[str]] =list_field(default=[] )
lowercase_ : Optional[List[int]] =list_field(default=[] )
@dataclass
class lowercase :
lowercase_ : List[int] =list_field(default=[] )
lowercase_ : List[int] =list_field(default=[1, 2, 3] )
lowercase_ : List[str] =list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
lowercase_ : List[float] =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase :
lowercase_ : List[int] =field()
lowercase_ : str =field()
lowercase_ : BasicEnum =field()
def A__ ( self):
lowercase = BasicEnum(self.required_enum)
@dataclass
class lowercase :
lowercase_ : int
lowercase_ : "BasicEnum" =field()
lowercase_ : "Optional[bool]" =None
lowercase_ : "str" =field(default='''toto''' , metadata={'''help''': '''help message'''} )
lowercase_ : "List[str]" =list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase :
lowercase_ : bool =False
lowercase_ : bool =True
lowercase_ : bool | None =None
@dataclass
class lowercase :
lowercase_ : int | None =None
lowercase_ : float | None =field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''help message'''} )
lowercase_ : str | None =None
lowercase_ : list[str] | None =list_field(default=[] )
lowercase_ : list[int] | None =list_field(default=[] )
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__ ,A__):
self.assertEqual(len(a._actions) ,len(b._actions))
for x, y in zip(a._actions ,b._actions):
lowercase = {k: v for k, v in vars(A__).items() if k != '''container'''}
lowercase = {k: v for k, v in vars(A__).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' ,A__) and yy.get('''choices''' ,A__):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](A__) ,yy['''type'''](A__))
del xx["type"], yy["type"]
self.assertEqual(A__ ,A__)
def A__ ( self):
lowercase = HfArgumentParser(A__)
lowercase = argparse.ArgumentParser()
expected.add_argument('''--foo''' ,type=A__ ,required=A__)
expected.add_argument('''--bar''' ,type=A__ ,required=A__)
expected.add_argument('''--baz''' ,type=A__ ,required=A__)
expected.add_argument('''--flag''' ,type=A__ ,default=A__ ,const=A__ ,nargs='''?''')
self.argparsersEqual(A__ ,A__)
lowercase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((lowercase) , ) = parser.parse_args_into_dataclasses(A__ ,look_for_args_file=A__)
self.assertFalse(example.flag)
def A__ ( self):
lowercase = HfArgumentParser(A__)
lowercase = argparse.ArgumentParser()
expected.add_argument('''--foo''' ,default=4_2 ,type=A__)
expected.add_argument('''--baz''' ,default='''toto''' ,type=A__ ,help='''help message''')
self.argparsersEqual(A__ ,A__)
def A__ ( self):
lowercase = argparse.ArgumentParser()
expected.add_argument('''--foo''' ,type=A__ ,default=A__ ,const=A__ ,nargs='''?''')
expected.add_argument('''--baz''' ,type=A__ ,default=A__ ,const=A__ ,nargs='''?''')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' ,action='''store_false''' ,default=A__ ,dest='''baz''')
expected.add_argument('''--opt''' ,type=A__ ,default=A__)
lowercase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(A__)
for dataclass_type in dataclass_types:
lowercase = HfArgumentParser(A__)
self.argparsersEqual(A__ ,A__)
lowercase = parser.parse_args([])
self.assertEqual(A__ ,Namespace(foo=A__ ,baz=A__ ,opt=A__))
lowercase = parser.parse_args(['''--foo''', '''--no_baz'''])
self.assertEqual(A__ ,Namespace(foo=A__ ,baz=A__ ,opt=A__))
lowercase = parser.parse_args(['''--foo''', '''--baz'''])
self.assertEqual(A__ ,Namespace(foo=A__ ,baz=A__ ,opt=A__))
lowercase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''])
self.assertEqual(A__ ,Namespace(foo=A__ ,baz=A__ ,opt=A__))
lowercase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''])
self.assertEqual(A__ ,Namespace(foo=A__ ,baz=A__ ,opt=A__))
def A__ ( self):
lowercase = HfArgumentParser(A__)
lowercase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' ,default='''toto''' ,choices=['''titi''', '''toto''', 4_2] ,type=make_choice_type_function(['''titi''', '''toto''', 4_2]) ,)
self.argparsersEqual(A__ ,A__)
lowercase = parser.parse_args([])
self.assertEqual(args.foo ,'''toto''')
lowercase = parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.toto)
lowercase = parser.parse_args(['''--foo''', '''titi'''])
self.assertEqual(args.foo ,'''titi''')
lowercase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''])[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.titi)
lowercase = parser.parse_args(['''--foo''', '''42'''])
self.assertEqual(args.foo ,4_2)
lowercase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''])[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.fourtytwo)
def A__ ( self):
@dataclass
class lowercase :
lowercase_ : Literal["titi", "toto", 42] ="toto"
lowercase = HfArgumentParser(A__)
lowercase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' ,default='''toto''' ,choices=('''titi''', '''toto''', 4_2) ,type=make_choice_type_function(['''titi''', '''toto''', 4_2]) ,)
self.argparsersEqual(A__ ,A__)
lowercase = parser.parse_args([])
self.assertEqual(args.foo ,'''toto''')
lowercase = parser.parse_args(['''--foo''', '''titi'''])
self.assertEqual(args.foo ,'''titi''')
lowercase = parser.parse_args(['''--foo''', '''42'''])
self.assertEqual(args.foo ,4_2)
def A__ ( self):
lowercase = HfArgumentParser(A__)
lowercase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' ,nargs='''+''' ,default=[] ,type=A__)
expected.add_argument('''--bar_int''' ,nargs='''+''' ,default=[1, 2, 3] ,type=A__)
expected.add_argument('''--foo_str''' ,nargs='''+''' ,default=['''Hallo''', '''Bonjour''', '''Hello'''] ,type=A__)
expected.add_argument('''--foo_float''' ,nargs='''+''' ,default=[0.1, 0.2, 0.3] ,type=A__)
self.argparsersEqual(A__ ,A__)
lowercase = parser.parse_args([])
self.assertEqual(
A__ ,Namespace(foo_int=[] ,bar_int=[1, 2, 3] ,foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] ,foo_float=[0.1, 0.2, 0.3]) ,)
lowercase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split())
self.assertEqual(A__ ,Namespace(foo_int=[1] ,bar_int=[2, 3] ,foo_str=['''a''', '''b''', '''c'''] ,foo_float=[0.1, 0.7]))
def A__ ( self):
lowercase = argparse.ArgumentParser()
expected.add_argument('''--foo''' ,default=A__ ,type=A__)
expected.add_argument('''--bar''' ,default=A__ ,type=A__ ,help='''help message''')
expected.add_argument('''--baz''' ,default=A__ ,type=A__)
expected.add_argument('''--ces''' ,nargs='''+''' ,default=[] ,type=A__)
expected.add_argument('''--des''' ,nargs='''+''' ,default=[] ,type=A__)
lowercase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(A__)
for dataclass_type in dataclass_types:
lowercase = HfArgumentParser(A__)
self.argparsersEqual(A__ ,A__)
lowercase = parser.parse_args([])
self.assertEqual(A__ ,Namespace(foo=A__ ,bar=A__ ,baz=A__ ,ces=[] ,des=[]))
lowercase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split())
self.assertEqual(A__ ,Namespace(foo=1_2 ,bar=3.14 ,baz='''42''' ,ces=['''a''', '''b''', '''c'''] ,des=[1, 2, 3]))
def A__ ( self):
lowercase = HfArgumentParser(A__)
lowercase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' ,nargs='''+''' ,type=A__ ,required=A__)
expected.add_argument('''--required_str''' ,type=A__ ,required=A__)
expected.add_argument(
'''--required_enum''' ,type=make_choice_type_function(['''titi''', '''toto''']) ,choices=['''titi''', '''toto'''] ,required=A__ ,)
self.argparsersEqual(A__ ,A__)
def A__ ( self):
lowercase = HfArgumentParser(A__)
lowercase = argparse.ArgumentParser()
expected.add_argument('''--foo''' ,type=A__ ,required=A__)
expected.add_argument(
'''--required_enum''' ,type=make_choice_type_function(['''titi''', '''toto''']) ,choices=['''titi''', '''toto'''] ,required=A__ ,)
expected.add_argument('''--opt''' ,type=A__ ,default=A__)
expected.add_argument('''--baz''' ,default='''toto''' ,type=A__ ,help='''help message''')
expected.add_argument('''--foo_str''' ,nargs='''+''' ,default=['''Hallo''', '''Bonjour''', '''Hello'''] ,type=A__)
self.argparsersEqual(A__ ,A__)
def A__ ( self):
lowercase = HfArgumentParser(A__)
lowercase = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
lowercase = parser.parse_dict(A__)[0]
lowercase = BasicExample(**A__)
self.assertEqual(A__ ,A__)
def A__ ( self):
lowercase = HfArgumentParser(A__)
lowercase = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(A__ ,parser.parse_dict ,A__ ,allow_extra_keys=A__)
def A__ ( self):
lowercase = HfArgumentParser(A__)
lowercase = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(A__ ,'''temp_json''')
os.mkdir(A__)
with open(temp_local_path + '''.json''' ,'''w+''') as f:
json.dump(A__ ,A__)
lowercase = parser.parse_yaml_file(Path(temp_local_path + '''.json'''))[0]
lowercase = BasicExample(**A__)
self.assertEqual(A__ ,A__)
def A__ ( self):
lowercase = HfArgumentParser(A__)
lowercase = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(A__ ,'''temp_yaml''')
os.mkdir(A__)
with open(temp_local_path + '''.yaml''' ,'''w+''') as f:
yaml.dump(A__ ,A__)
lowercase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml'''))[0]
lowercase = BasicExample(**A__)
self.assertEqual(A__ ,A__)
def A__ ( self):
lowercase = HfArgumentParser(A__)
self.assertIsNotNone(A__)
| 633
|
from numpy import exp, pi, sqrt
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
SCREAMING_SNAKE_CASE__ = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : List[Any] = """albert"""
def __init__( self : int , UpperCAmelCase : Tuple=3_00_00 , UpperCAmelCase : int=1_28 , UpperCAmelCase : Optional[Any]=40_96 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Union[str, Any]=64 , UpperCAmelCase : str=1_63_84 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Dict="gelu_new" , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Dict=0 , UpperCAmelCase : int=5_12 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : int=1E-12 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Tuple="absolute" , UpperCAmelCase : Any=0 , UpperCAmelCase : int=2 , UpperCAmelCase : List[Any]=3 , **UpperCAmelCase : Optional[int] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ :str = embedding_size
SCREAMING_SNAKE_CASE_ :List[str] = hidden_size
SCREAMING_SNAKE_CASE_ :Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ :Optional[Any] = num_hidden_groups
SCREAMING_SNAKE_CASE_ :Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ :str = inner_group_num
SCREAMING_SNAKE_CASE_ :List[str] = hidden_act
SCREAMING_SNAKE_CASE_ :Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ :Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_ :List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ :List[str] = initializer_range
SCREAMING_SNAKE_CASE_ :List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ :List[str] = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ :Union[str, Any] = position_embedding_type
class _UpperCAmelCase ( lowercase ):
@property
def _snake_case ( self : List[Any]):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ :Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ :Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 631
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
SCREAMING_SNAKE_CASE__ = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
SCREAMING_SNAKE_CASE__ = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
SCREAMING_SNAKE_CASE__ = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
SCREAMING_SNAKE_CASE__ = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def _snake_case ( self : List[str]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def _snake_case ( self : Any , UpperCAmelCase : Any):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').")
SCREAMING_SNAKE_CASE_ :str = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE_ :Dict = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE_ :Dict = self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}")
# download the model checkpoint specified by self.config_name and set up the scorer
SCREAMING_SNAKE_CASE_ :Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
SCREAMING_SNAKE_CASE_ :Dict = score.BleurtScorer(os.path.join(UpperCAmelCase , UpperCAmelCase))
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Optional[int] = self.scorer.score(references=UpperCAmelCase , candidates=UpperCAmelCase)
return {"scores": scores}
| 631
| 1
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def UpperCamelCase_ ( ):
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path', type=lowerCamelCase__, default='biencoder-nq-dev.json', help='Path to raw DPR training data', )
parser.add_argument(
'--evaluation_set', type=lowerCamelCase__, help='where to store parsed evaluation_set file', )
parser.add_argument(
'--gold_data_path', type=lowerCamelCase__, help='where to store parsed gold_data_path file', )
SCREAMING_SNAKE_CASE = parser.parse_args()
with open(args.src_path, 'r' ) as src_file, open(args.evaluation_set, 'w' ) as eval_file, open(
args.gold_data_path, 'w' ) as gold_file:
SCREAMING_SNAKE_CASE = json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = dpr_record["question"]
SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(lowerCamelCase__ ) + '\n' )
if __name__ == "__main__":
main()
| 715
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Union[str, Any] = tempfile.mkdtemp()
UpperCamelCase_: str = BlipImageProcessor()
UpperCamelCase_: str = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
UpperCamelCase_: Dict = BlipProcessor(_lowerCamelCase , _lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).tokenizer
def _a ( self , **_lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor
def _a ( self ):
shutil.rmtree(self.tmpdirname )
def _a ( self ):
UpperCamelCase_: List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase_: int = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ):
UpperCamelCase_: Union[str, Any] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: List[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase_: Any = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
UpperCamelCase_: List[str] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Dict = self.get_image_processor()
UpperCamelCase_: Optional[int] = self.get_tokenizer()
UpperCamelCase_: Tuple = BlipProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = self.prepare_image_inputs()
UpperCamelCase_: Optional[int] = image_processor(_lowerCamelCase , return_tensors='np' )
UpperCamelCase_: Optional[Any] = processor(images=_lowerCamelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = self.get_image_processor()
UpperCamelCase_: Tuple = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = BlipProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCamelCase_: Any = 'lower newer'
UpperCamelCase_: List[str] = processor(text=_lowerCamelCase )
UpperCamelCase_: int = tokenizer(_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ):
UpperCamelCase_: Optional[int] = self.get_image_processor()
UpperCamelCase_: Optional[int] = self.get_tokenizer()
UpperCamelCase_: Optional[int] = BlipProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCamelCase_: Optional[int] = 'lower newer'
UpperCamelCase_: int = self.prepare_image_inputs()
UpperCamelCase_: List[str] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self ):
UpperCamelCase_: Optional[Any] = self.get_image_processor()
UpperCamelCase_: Optional[int] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = BlipProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCamelCase_: Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: List[str] = processor.batch_decode(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = self.get_image_processor()
UpperCamelCase_: int = self.get_tokenizer()
UpperCamelCase_: List[Any] = BlipProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCamelCase_: Tuple = 'lower newer'
UpperCamelCase_: List[str] = self.prepare_image_inputs()
UpperCamelCase_: Dict = processor(text=_lowerCamelCase , images=_lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 57
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=36 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
"""simple docstring"""
snake_case__ : Any =parent
snake_case__ : str =batch_size
snake_case__ : str =seq_length
snake_case__ : Any =is_training
snake_case__ : Optional[Any] =use_input_mask
snake_case__ : List[str] =use_token_type_ids
snake_case__ : int =use_labels
snake_case__ : Dict =vocab_size
snake_case__ : Union[str, Any] =embedding_size
snake_case__ : Union[str, Any] =hidden_size
snake_case__ : Dict =num_hidden_layers
snake_case__ : List[Any] =num_hidden_groups
snake_case__ : str =num_attention_heads
snake_case__ : List[str] =intermediate_size
snake_case__ : Dict =hidden_act
snake_case__ : Tuple =hidden_dropout_prob
snake_case__ : List[Any] =attention_probs_dropout_prob
snake_case__ : Any =max_position_embeddings
snake_case__ : int =type_vocab_size
snake_case__ : Union[str, Any] =type_sequence_label_size
snake_case__ : str =initializer_range
snake_case__ : Optional[Any] =num_labels
snake_case__ : Dict =num_choices
snake_case__ : List[str] =scope
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Tuple =None
if self.use_input_mask:
snake_case__ : Any =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] =None
if self.use_token_type_ids:
snake_case__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Optional[Any] =None
snake_case__ : Optional[Any] =None
snake_case__ : Optional[Any] =None
if self.use_labels:
snake_case__ : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : str =ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Dict =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : List[Any] =AlbertModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : List[str] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : int =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : List[Any] =AlbertForPreTraining(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , sentence_order_label=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] =AlbertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Dict =AlbertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] =self.num_labels
snake_case__ : Any =AlbertForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
snake_case__ : int =self.num_labels
snake_case__ : int =AlbertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : Dict =self.num_choices
snake_case__ : Optional[Any] =AlbertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Dict =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Tuple =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] =self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : List[str] =config_and_inputs
snake_case__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ =True
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : int =super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
snake_case__ : Dict =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : str =AlbertModelTester(self )
snake_case__ : Optional[Any] =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : List[Any] =type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[Any] =AlbertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
snake_case__ : Dict =AlbertModel.from_pretrained('''albert-base-v2''' )
snake_case__ : Dict =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case__ : List[Any] =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : Union[str, Any] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
snake_case__ : Dict =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 381
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 216
|
'''simple docstring'''
class snake_case__ :
def __init__( self : Dict , _A : int ) -> Tuple:
UpperCAmelCase_ : List[str] = n
UpperCAmelCase_ : Optional[Any] = [None] * self.n
UpperCAmelCase_ : List[str] = 0 # index of the first element
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[Any] = 0
def __len__( self : Optional[int] ) -> int:
return self.size
def A ( self : List[Any] ) -> bool:
return self.size == 0
def A ( self : str ) -> Dict:
return False if self.is_empty() else self.array[self.front]
def A ( self : Any , _A : int ) -> List[str]:
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCAmelCase_ : Dict = data
UpperCAmelCase_ : List[str] = (self.rear + 1) % self.n
self.size += 1
return self
def A ( self : Optional[int] ) -> str:
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCAmelCase_ : Dict = self.array[self.front]
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = (self.front + 1) % self.n
self.size -= 1
return temp
| 216
| 1
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a (_lowerCAmelCase , _lowerCAmelCase=False ):
SCREAMING_SNAKE_CASE_ = OmegaConf.load(SCREAMING_SNAKE_CASE_ )
if display:
print(yaml.dump(OmegaConf.to_container(SCREAMING_SNAKE_CASE_ ) ) )
return config
def a (_lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ):
if conf_path is None:
SCREAMING_SNAKE_CASE_ = '''./model_checkpoints/vqgan_only.yaml'''
SCREAMING_SNAKE_CASE_ = load_config(SCREAMING_SNAKE_CASE_ , display=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = VQModel(**config.model.params )
if ckpt_path is None:
SCREAMING_SNAKE_CASE_ = '''./model_checkpoints/vqgan_only.pt'''
SCREAMING_SNAKE_CASE_ = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ )
if ".ckpt" in ckpt_path:
SCREAMING_SNAKE_CASE_ = sd['''state_dict''']
model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
del sd
return model
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model.encode(SCREAMING_SNAKE_CASE_ )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
SCREAMING_SNAKE_CASE_ = model.decode(SCREAMING_SNAKE_CASE_ )
return xrec
def a (_lowerCAmelCase , _lowerCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = string.rsplit('''.''' , 1 )
if reload:
SCREAMING_SNAKE_CASE_ = importlib.import_module(SCREAMING_SNAKE_CASE_ )
importlib.reload(SCREAMING_SNAKE_CASE_ )
return getattr(importlib.import_module(SCREAMING_SNAKE_CASE_ , package=SCREAMING_SNAKE_CASE_ ) , cls )
def a (_lowerCAmelCase ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True , _lowerCAmelCase=True ):
SCREAMING_SNAKE_CASE_ = instantiate_from_config(SCREAMING_SNAKE_CASE_ )
if sd is not None:
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if ckpt:
SCREAMING_SNAKE_CASE_ = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE_ = pl_sd['''global_step''']
print(F"loaded model from global step {global_step}." )
else:
SCREAMING_SNAKE_CASE_ = {'''state_dict''': None}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=SCREAMING_SNAKE_CASE_ , eval_mode=SCREAMING_SNAKE_CASE_ )['''model''']
return model, global_step
| 234
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Dict = (KDPMaDiscreteScheduler,)
UpperCamelCase_ :str = 1_0
def UpperCAmelCase_ ( self , **_lowercase )-> str:
UpperCamelCase_ = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**_lowercase )
return config
def UpperCAmelCase_ ( self )-> Union[str, Any]:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def UpperCAmelCase_ ( self )-> int:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def UpperCAmelCase_ ( self )-> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowercase )
def UpperCAmelCase_ ( self )-> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCamelCase_ = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase_ = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = scheduler.scale_model_input(_lowercase , _lowercase )
UpperCamelCase_ = model(_lowercase , _lowercase )
UpperCamelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(_lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def UpperCAmelCase_ ( self )-> Dict:
if torch_device == "mps":
return
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase_ = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = scheduler.scale_model_input(_lowercase , _lowercase )
UpperCamelCase_ = model(_lowercase , _lowercase )
UpperCamelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(_lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def UpperCAmelCase_ ( self )-> Optional[int]:
if torch_device == "mps":
return
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter.to(_lowercase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCamelCase_ = scheduler.scale_model_input(_lowercase , _lowercase )
UpperCamelCase_ = model(_lowercase , _lowercase )
UpperCamelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(_lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(_lowercase ) )
if str(_lowercase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 628
| 0
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE__ = False
try:
SCREAMING_SNAKE_CASE__ = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _UpperCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : str = None , UpperCAmelCase : list = []):
SCREAMING_SNAKE_CASE_ :List[str] = 0
SCREAMING_SNAKE_CASE_ :int = choices
SCREAMING_SNAKE_CASE_ :Optional[int] = prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE_ :str = "*"
else:
SCREAMING_SNAKE_CASE_ :Optional[Any] = "➔ "
def _snake_case ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : str = ""):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCAmelCase)
else:
forceWrite(self.choices[index] , UpperCAmelCase)
def _snake_case ( self : List[str] , UpperCAmelCase : int):
if index == self.position:
forceWrite(F" {self.arrow_char} ")
self.write_choice(UpperCAmelCase)
else:
forceWrite(F" {self.choices[index]}")
reset_cursor()
def _snake_case ( self : List[str] , UpperCAmelCase : Direction , UpperCAmelCase : int = 1):
SCREAMING_SNAKE_CASE_ :Dict = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCAmelCase)
move_cursor(UpperCAmelCase , direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def _snake_case ( self : Dict):
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def _snake_case ( self : Optional[int]):
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def _snake_case ( self : List[str]):
move_cursor(len(self.choices) - self.position , "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def _snake_case ( self : List[Any]):
move_cursor(len(self.choices) - self.position , "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCAmelCase)] for number in range(10)])
def _snake_case ( self : List[str]):
SCREAMING_SNAKE_CASE_ :int = int(chr(self.current_selection))
SCREAMING_SNAKE_CASE_ :Optional[Any] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP , -movement)
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCAmelCase)
else:
return
else:
return
def _snake_case ( self : Optional[Any] , UpperCAmelCase : int = 0):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n")
SCREAMING_SNAKE_CASE_ :str = default_choice
for i in range(len(self.choices)):
self.print_choice(UpperCAmelCase)
forceWrite("\n")
move_cursor(len(self.choices) - self.position , "UP")
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE_ :Any = int(builtins.input())
except ValueError:
SCREAMING_SNAKE_CASE_ :List[str] = default_choice
else:
SCREAMING_SNAKE_CASE_ :Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1 , "UP")
clear_line()
self.write_choice(UpperCAmelCase , "\n")
return choice
| 140
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowercase ( a , a , a = 1 / sqrt(2 ) ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ :List[str] = sin(a )
SCREAMING_SNAKE_CASE_ :Tuple = cos(a )
SCREAMING_SNAKE_CASE_ :Dict = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ :Optional[int] = (1 - _cos) / 2
SCREAMING_SNAKE_CASE_ :Dict = 1 - _cos
SCREAMING_SNAKE_CASE_ :Tuple = 1 + alpha
SCREAMING_SNAKE_CASE_ :Optional[Any] = -2 * _cos
SCREAMING_SNAKE_CASE_ :Dict = 1 - alpha
SCREAMING_SNAKE_CASE_ :Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowercase ( a , a , a = 1 / sqrt(2 ) ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ :Optional[int] = sin(a )
SCREAMING_SNAKE_CASE_ :int = cos(a )
SCREAMING_SNAKE_CASE_ :str = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ :Optional[int] = (1 + _cos) / 2
SCREAMING_SNAKE_CASE_ :int = -1 - _cos
SCREAMING_SNAKE_CASE_ :Any = 1 + alpha
SCREAMING_SNAKE_CASE_ :Any = -2 * _cos
SCREAMING_SNAKE_CASE_ :Optional[Any] = 1 - alpha
SCREAMING_SNAKE_CASE_ :Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowercase ( a , a , a = 1 / sqrt(2 ) ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Any = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ :str = sin(a )
SCREAMING_SNAKE_CASE_ :Optional[int] = cos(a )
SCREAMING_SNAKE_CASE_ :Optional[Any] = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ :Any = _sin / 2
SCREAMING_SNAKE_CASE_ :Optional[int] = 0
SCREAMING_SNAKE_CASE_ :str = -ba
SCREAMING_SNAKE_CASE_ :str = 1 + alpha
SCREAMING_SNAKE_CASE_ :Union[str, Any] = -2 * _cos
SCREAMING_SNAKE_CASE_ :Tuple = 1 - alpha
SCREAMING_SNAKE_CASE_ :Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowercase ( a , a , a = 1 / sqrt(2 ) ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Any = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ :Optional[Any] = sin(a )
SCREAMING_SNAKE_CASE_ :str = cos(a )
SCREAMING_SNAKE_CASE_ :Any = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ :Optional[Any] = 1 - alpha
SCREAMING_SNAKE_CASE_ :int = -2 * _cos
SCREAMING_SNAKE_CASE_ :Tuple = 1 + alpha
SCREAMING_SNAKE_CASE_ :Any = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowercase ( a , a , a , a = 1 / sqrt(2 ) , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ :Any = sin(a )
SCREAMING_SNAKE_CASE_ :Any = cos(a )
SCREAMING_SNAKE_CASE_ :List[str] = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ :str = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE_ :str = 1 + alpha * big_a
SCREAMING_SNAKE_CASE_ :int = -2 * _cos
SCREAMING_SNAKE_CASE_ :List[Any] = 1 - alpha * big_a
SCREAMING_SNAKE_CASE_ :Optional[Any] = 1 + alpha / big_a
SCREAMING_SNAKE_CASE_ :Optional[Any] = -2 * _cos
SCREAMING_SNAKE_CASE_ :Any = 1 - alpha / big_a
SCREAMING_SNAKE_CASE_ :Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowercase ( a , a , a , a = 1 / sqrt(2 ) , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ :Optional[int] = sin(a )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = cos(a )
SCREAMING_SNAKE_CASE_ :str = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE_ :Optional[int] = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ :Tuple = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ :List[Any] = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ :Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ :Optional[Any] = 2 * sqrt(a ) * alpha
SCREAMING_SNAKE_CASE_ :Optional[Any] = big_a * (pmc + aaa)
SCREAMING_SNAKE_CASE_ :str = 2 * big_a * mpc
SCREAMING_SNAKE_CASE_ :List[Any] = big_a * (pmc - aaa)
SCREAMING_SNAKE_CASE_ :Optional[int] = ppmc + aaa
SCREAMING_SNAKE_CASE_ :Dict = -2 * pmpc
SCREAMING_SNAKE_CASE_ :str = ppmc - aaa
SCREAMING_SNAKE_CASE_ :Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowercase ( a , a , a , a = 1 / sqrt(2 ) , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[Any] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ :Any = sin(a )
SCREAMING_SNAKE_CASE_ :Tuple = cos(a )
SCREAMING_SNAKE_CASE_ :Tuple = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ :Any = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE_ :Dict = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ :Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ :List[str] = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ :Tuple = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ :Dict = 2 * sqrt(a ) * alpha
SCREAMING_SNAKE_CASE_ :int = big_a * (ppmc + aaa)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = -2 * big_a * pmpc
SCREAMING_SNAKE_CASE_ :Any = big_a * (ppmc - aaa)
SCREAMING_SNAKE_CASE_ :List[str] = pmc + aaa
SCREAMING_SNAKE_CASE_ :Optional[int] = 2 * mpc
SCREAMING_SNAKE_CASE_ :Union[str, Any] = pmc - aaa
SCREAMING_SNAKE_CASE_ :Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 140
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( A_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__UpperCamelCase : Union[str, Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def __SCREAMING_SNAKE_CASE ( A_ ):
if not isinstance(A_ , A_ ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
lowerCAmelCase__ : Optional[Any] = []
for num in range(len(A_ ) ):
lowerCAmelCase__ : Union[str, Any] = 0
while 2 * i * i <= odd_composites[num]:
lowerCAmelCase__ : Union[str, Any] = odd_composites[num] - 2 * i * i
if is_prime(A_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A_ ) == n:
return list_nums
return []
def __SCREAMING_SNAKE_CASE ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 450
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : List[str] = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 450
| 1
|
"""simple docstring"""
from math import factorial
lowercase_ = {str(d): factorial(d) for d in range(10)}
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(__UpperCamelCase ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __UpperCamelCase ) if sum_of_digit_factorial(__UpperCamelCase ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 712
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : Any=3, _lowerCamelCase : List[Any]=18, _lowerCamelCase : str=30, _lowerCamelCase : List[Any]=4_00, _lowerCamelCase : List[str]=True, _lowerCamelCase : List[Any]=None, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : List[str]=False, _lowerCamelCase : str=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], _lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5], ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size if size is not None else {'''height''': 18, '''width''': 20}
__A = do_thumbnail
__A = do_align_axis
__A = do_pad
__A = do_normalize
__A = image_mean
__A = image_std
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = DonutImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = DonutImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_thumbnail''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_pad''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 20} )
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {'''height''': 84, '''width''': 42} )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
pass
@is_flaky()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
@is_flaky()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
@is_flaky()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
| 215
| 0
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowercase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def A ():
_lowerCAmelCase = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_lowerCAmelCase = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 5
|
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def A__( __lowerCAmelCase = 1_00 ):
_snake_case : Any = 1
_snake_case : Optional[int] = 2
for i in range(2 , max_n + 1 ):
_snake_case : Union[str, Any] = pre_numerator
_snake_case : Optional[Any] = 2 * i // 3 if i % 3 == 0 else 1
_snake_case : Dict = cur_numerator
_snake_case : str = e_cont * pre_numerator + temp
return sum_digits(__lowerCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 304
| 0
|
_SCREAMING_SNAKE_CASE = {}
def _snake_case (_snake_case : int , _snake_case : int , _snake_case : int) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowercase =(days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowercase =_calculate(days - 1 , _snake_case , late + 1)
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowercase =_calculate(days - 1 , absent + 1 , 0)
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowercase =_calculate(days - 1 , _snake_case , 0)
_lowercase =state_late + state_absent + state_ontime
_lowercase =prizestrings
return prizestrings
def _snake_case (_snake_case : int = 30) -> int:
return _calculate(_snake_case , absent=0 , late=0)
if __name__ == "__main__":
print(solution())
| 557
|
def _snake_case (_snake_case : int) -> bool:
if p < 2:
raise ValueError('p should not be less than 2!')
elif p == 2:
return True
_lowercase =4
_lowercase =(1 << p) - 1
for _ in range(p - 2):
_lowercase =((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 557
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A_ :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = None
_lowerCAmelCase : List[str] = namedtuple('''CoinsDistribResult''', '''moves excess''')
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_lowerCamelCase ) != count_coins(_lowerCamelCase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_lowerCamelCase, _lowerCamelCase : Dict = get_distrib(node.left )
_lowerCamelCase, _lowerCamelCase : str = get_distrib(node.right )
_lowerCamelCase : Any = 1 - left_distrib_excess
_lowerCamelCase : Dict = 1 - right_distrib_excess
_lowerCamelCase : Union[str, Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(_lowerCamelCase )
+ abs(_lowerCamelCase )
)
_lowerCamelCase : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_lowerCamelCase , _lowerCamelCase )
return get_distrib(_lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
|
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(lowerCAmelCase ):
print(f'''{i}\t\t{d}''' )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
for j in range(lowerCAmelCase ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = [float('''inf''' )] * vertex_count
_UpperCamelCase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCAmelCase ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
_UpperCamelCase = distance[u] + w
_UpperCamelCase = check_negative_cycle(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Optional[Any] = int(input("""Enter number of vertices: """).strip())
lowercase : List[str] = int(input("""Enter number of edges: """).strip())
lowercase : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
lowercase ,lowercase ,lowercase : int = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
lowercase : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
lowercase : Optional[int] = int(input("""\nEnter shortest path source:""").strip())
lowercase : Any = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 105
|
from __future__ import annotations
lowercase : int = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class __A:
def __init__( self, A, A ):
"""simple docstring"""
_UpperCamelCase = graph
# mapping node to its parent in resulting breadth first tree
_UpperCamelCase = {}
_UpperCamelCase = source_vertex
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = {self.source_vertex}
_UpperCamelCase = None
_UpperCamelCase = [self.source_vertex] # first in first out queue
while queue:
_UpperCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(A )
_UpperCamelCase = vertex
queue.append(A )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCamelCase = self.parent.get(A )
if target_vertex_parent is None:
_UpperCamelCase = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(A )
return self.shortest_path(A ) + F'''->{target_vertex}'''
if __name__ == "__main__":
lowercase : Tuple = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 105
| 1
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(1_0_0, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 695
|
"""simple docstring"""
from typing import Any
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , ) -> list:
_validation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Creates data structures and fill initial step
__a = {}
__a = {}
for state in states_space:
__a = observations_space[0]
__a = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__a = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase__ ) ):
__a = observations_space[o]
__a = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__a = ''''''
__a = -1
for k_state in states_space:
__a = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__a = probability
__a = k_state
# Update probabilities and pointers dicts
__a = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__a = arg_max
# The final observation
__a = observations_space[len(lowerCAmelCase__ ) - 1]
# argmax for given final observation
__a = ''''''
__a = -1
for k_state in states_space:
__a = probabilities[(k_state, final_observation)]
if probability > max_probability:
__a = probability
__a = k_state
__a = arg_max
# Process pointers backwards
__a = last_state
__a = []
for o in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase__ )
__a = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_not_empty(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
_validate_lists(lowerCAmelCase__ , lowerCAmelCase__ )
_validate_dicts(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> None:
_validate_list(lowerCAmelCase__ , '''observations_space''' )
_validate_list(lowerCAmelCase__ , '''states_space''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list'''
raise ValueError(lowerCAmelCase__ )
else:
for x in _object:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list of strings'''
raise ValueError(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_dict(lowerCAmelCase__ , '''initial_probabilities''' , lowerCAmelCase__ )
_validate_nested_dict(lowerCAmelCase__ , '''transition_probabilities''' )
_validate_nested_dict(lowerCAmelCase__ , '''emission_probabilities''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
_validate_dict(_object , lowerCAmelCase__ , lowerCAmelCase__ )
for x in _object.values():
_validate_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : type , lowerCAmelCase__ : bool = False ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a dict'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object ):
__a = f'''{var_name} all keys must be strings'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object.values() ):
__a = '''nested dictionary ''' if nested else ''''''
__a = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695
| 1
|
def __snake_case ( _UpperCAmelCase ):
__a = [1]
__a , __a , __a = 0, 0, 0
__a = ugly_nums[ia] * 2
__a = ugly_nums[ia] * 3
__a = ugly_nums[ia] * 5
for _ in range(1 , _UpperCAmelCase ):
__a = min(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
ugly_nums.append(_UpperCAmelCase )
if next_num == next_a:
ia += 1
__a = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__a = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__a = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(200) = }')
| 60
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case :int = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _A ( tr.AbstractTransform ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "):
'''simple docstring'''
__a = sentence_delimiter
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return list(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = []
for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE):
chars.extend(self.process_string(__SCREAMING_SNAKE_CASE))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1:
chars.append(self.sentence_delimiter)
return chars
__snake_case :Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case :Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case :Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case :Tuple = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case :Tuple = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 60
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
snake_case__ : Tuple = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
def lowercase__ ( _UpperCamelCase) -> Dict:
"""simple docstring"""
UpperCamelCase = r'\w+[.]\d+'
UpperCamelCase = re.findall(_UpperCamelCase , _UpperCamelCase)
for pat in pats:
UpperCamelCase = key.replace(_UpperCamelCase , '_'.join(pat.split('.')))
return key
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Tuple:
"""simple docstring"""
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key)
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCamelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCamelCase = pt_tensor.transpose(2 , 3 , 1 , 0)
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
UpperCamelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=42) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCamelCase = flax_model.init_weights(PRNGKey(_UpperCamelCase))
UpperCamelCase = flatten_dict(_UpperCamelCase)
UpperCamelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase = rename_key(_UpperCamelCase)
UpperCamelCase = tuple(renamed_pt_key.split('.'))
# Correctly rename weight parameters
UpperCamelCase , UpperCamelCase = rename_key_and_reshape_tensor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.')
# also add unexpected weight so that warning is thrown
UpperCamelCase = jnp.asarray(_UpperCamelCase)
return unflatten_dict(_UpperCamelCase)
| 280
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : int = 'sew-d'
def __init__( self , lowerCamelCase__=3_2 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=3_0_7_2 , lowerCamelCase__=2 , lowerCamelCase__=5_1_2 , lowerCamelCase__=2_5_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=1_2_8 , lowerCamelCase__=1_6 , lowerCamelCase__=True , lowerCamelCase__=0.0_5 , lowerCamelCase__=1_0 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=1_0 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=2_5_6 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
_lowerCamelCase = hidden_size
_lowerCamelCase = feat_extract_norm
_lowerCamelCase = feat_extract_activation
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = conv_bias
_lowerCamelCase = num_conv_pos_embeddings
_lowerCamelCase = num_conv_pos_embedding_groups
_lowerCamelCase = len(self.conv_dim )
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = intermediate_size
_lowerCamelCase = squeeze_factor
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = position_buckets
_lowerCamelCase = share_att_key
_lowerCamelCase = relative_attention
_lowerCamelCase = norm_rel_ebd
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = hidden_act
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = feat_proj_dropout
_lowerCamelCase = final_dropout
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = feature_layer_norm_eps
_lowerCamelCase = initializer_range
_lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase = apply_spec_augment
_lowerCamelCase = mask_time_prob
_lowerCamelCase = mask_time_length
_lowerCamelCase = mask_time_min_masks
_lowerCamelCase = mask_feature_prob
_lowerCamelCase = mask_feature_length
_lowerCamelCase = mask_feature_min_masks
# ctc loss
_lowerCamelCase = ctc_loss_reduction
_lowerCamelCase = ctc_zero_infinity
# sequence classification
_lowerCamelCase = use_weighted_layer_sum
_lowerCamelCase = classifier_proj_size
@property
def snake_case__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 623
|
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float = 1e-12 , lowercase_ : int = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase_ ) == np.iscomplexobj(lowercase_ )
_lowerCamelCase = np.iscomplexobj(lowercase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 1e12
while not convergence:
# Multiple matrix by the vector.
_lowerCamelCase = np.dot(lowercase_ , lowercase_ )
# Normalize the resulting output vector.
_lowerCamelCase = w / np.linalg.norm(lowercase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowerCamelCase = vector.conj().T if is_complex else vector.T
_lowerCamelCase = np.dot(lowercase_ , np.dot(lowercase_ , lowercase_ ) )
# Check convergence.
_lowerCamelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowerCamelCase = True
_lowerCamelCase = lambda_
if is_complex:
_lowerCamelCase = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowerCamelCase = np.array([41, 4, 20] )
_lowerCamelCase = real_input_matrix.astype(np.complexaaa )
_lowerCamelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowerCamelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowerCamelCase = real_input_matrix
_lowerCamelCase = real_vector
elif problem_type == "complex":
_lowerCamelCase = complex_input_matrix
_lowerCamelCase = complex_vector
# Our implementation.
_lowerCamelCase , _lowerCamelCase = power_iteration(lowercase_ , lowercase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(lowercase_ )
# Last eigenvalue is the maximum one.
_lowerCamelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowerCamelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase_ ) - np.abs(lowercase_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 623
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def __snake_case ( _UpperCAmelCase : Optional[int]):
if num <= 0:
UpperCamelCase = f'{num}: Invalid input, please enter a positive integer.'
raise ValueError(__lowerCamelCase)
UpperCamelCase = [True] * (num + 1)
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = int(math.sqrt(__lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__lowerCamelCase)
# Set multiples of start be False
for i in range(start * start, num + 1, __lowerCamelCase):
if sieve[i] is True:
UpperCamelCase = False
start += 1
for j in range(end + 1, num + 1):
if sieve[j] is True:
prime.append(__lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 212
|
def lowerCAmelCase_ ( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(__lowerCamelCase , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81
| 0
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = BlenderbotSmallTokenizer
a_ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
_lowerCAmelCase : str = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
_lowerCAmelCase : int = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
_lowerCAmelCase : int = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
_lowerCAmelCase : List[str] = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , **_snake_case ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : int = "adapt act apte"
_lowerCAmelCase : int = "adapt act apte"
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : Tuple = "adapt act apte"
_lowerCAmelCase : List[Any] = ["adapt", "act", "ap@@", "te"]
_lowerCAmelCase : List[Any] = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase : List[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCAmelCase : List[str] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Any = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
_lowerCAmelCase : str = "I am a small frog."
_lowerCAmelCase : List[str] = tok([src_text] , padding=_snake_case , truncation=_snake_case )["input_ids"]
_lowerCAmelCase : Any = tok.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
_lowerCAmelCase : List[str] = "I am a small frog ."
_lowerCAmelCase : Tuple = "."
_lowerCAmelCase : List[str] = tok(_snake_case )["input_ids"]
_lowerCAmelCase : Dict = tok(_snake_case )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 587
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __A ( snake_case__ ):
'''simple docstring'''
a_ = '''speech_to_text'''
a_ = ['''past_key_values''']
a_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _snake_case=1_0000 , _snake_case=12 , _snake_case=2048 , _snake_case=4 , _snake_case=6 , _snake_case=2048 , _snake_case=4 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=True , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=2 , _snake_case=True , _snake_case=1 , _snake_case=0 , _snake_case=2 , _snake_case=6000 , _snake_case=1024 , _snake_case=2 , _snake_case=(5, 5) , _snake_case=1024 , _snake_case=80 , _snake_case=1 , **_snake_case , ):
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Any = d_model
_lowerCAmelCase : Union[str, Any] = encoder_ffn_dim
_lowerCAmelCase : List[Any] = encoder_layers
_lowerCAmelCase : Dict = encoder_attention_heads
_lowerCAmelCase : Union[str, Any] = decoder_ffn_dim
_lowerCAmelCase : List[str] = decoder_layers
_lowerCAmelCase : Any = decoder_attention_heads
_lowerCAmelCase : Optional[Any] = dropout
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : Optional[int] = activation_dropout
_lowerCAmelCase : int = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : int = encoder_layerdrop
_lowerCAmelCase : Optional[Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[int] = max_source_positions
_lowerCAmelCase : Optional[int] = max_target_positions
_lowerCAmelCase : str = num_conv_layers
_lowerCAmelCase : Optional[Any] = list(_snake_case )
_lowerCAmelCase : Optional[Any] = conv_channels
_lowerCAmelCase : Optional[Any] = input_feat_per_channel
_lowerCAmelCase : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , **_snake_case , )
| 587
| 1
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( snake_case__ : Optional[int] , snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple ) -> str:
'''simple docstring'''
__snake_case = tmp_path / 'cache'
__snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case = JsonDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case = tmp_path / 'cache'
__snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__snake_case = features.copy() if features else default_expected_features
__snake_case = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case = JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def A ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = tmp_path / 'cache'
__snake_case = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
__snake_case = features.copy() if features else default_expected_features
__snake_case = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case = JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A ( snake_case__ : Tuple , snake_case__ : Any ) -> List[Any]:
'''simple docstring'''
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__snake_case = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
__snake_case = features.copy()
__snake_case = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case = tmp_path / 'cache'
__snake_case = JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = tmp_path / 'cache'
__snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__snake_case = JsonDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def A ( snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(snake_case__ , snake_case__ ):
__snake_case = jsonl_path
elif issubclass(snake_case__ , snake_case__ ):
__snake_case = [jsonl_path]
__snake_case = tmp_path / 'cache'
__snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__snake_case = JsonDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
def A ( snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : str=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
__snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
__snake_case = tmp_path / 'cache'
__snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case = JsonDatasetReader({'train': jsonl_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int ) -> Any:
'''simple docstring'''
__snake_case = tmp_path / 'cache'
__snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__snake_case = features.copy() if features else default_expected_features
__snake_case = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case = JsonDatasetReader({'train': jsonl_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : str ) -> Tuple:
'''simple docstring'''
if split:
__snake_case = {split: jsonl_path}
else:
__snake_case = 'train'
__snake_case = {'train': jsonl_path, 'test': jsonl_path}
__snake_case = tmp_path / 'cache'
__snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__snake_case = JsonDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return json.load(snake_case__ )
def A ( snake_case__ : Dict ) -> Dict:
'''simple docstring'''
return [json.loads(snake_case__ ) for line in buffer]
class __lowercase :
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)])
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_).write()
buffer.seek(0)
__snake_case = load_json_function(lowercase_)
assert isinstance(lowercase_ , lowercase_)
assert isinstance(exported_content[0] , lowercase_)
assert len(lowercase_) == 1_0
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789'), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_).write()
buffer.seek(0)
__snake_case = load_json(lowercase_)
assert isinstance(lowercase_ , lowercase_)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , 'keys') and not hasattr(exported_content[0] , 'keys')
if len_at:
assert len(exported_content[len_at]) == 1_0
else:
assert len(lowercase_) == 1_0
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)])
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Tuple:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2).write()
buffer.seek(0)
__snake_case = load_json_function(lowercase_)
assert isinstance(lowercase_ , lowercase_)
assert isinstance(exported_content[0] , lowercase_)
assert len(lowercase_) == 1_0
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789'), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2).write()
buffer.seek(0)
__snake_case = load_json(lowercase_)
assert isinstance(lowercase_ , lowercase_)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , 'keys') and not hasattr(exported_content[0] , 'keys')
if len_at:
assert len(exported_content[len_at]) == 1_0
else:
assert len(lowercase_) == 1_0
def _a ( self , lowercase_) -> str:
with pytest.raises(lowercase_):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0)
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')])
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> int:
__snake_case = tmp_path_factory.mktemp('data') / F"test.json.{extension}"
__snake_case = str(shared_datadir / F"test_file.json.{extension}")
JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_).write()
with fsspec.open(lowercase_ , 'rb' , compression='infer') as f:
__snake_case = f.read()
with fsspec.open(lowercase_ , 'rb' , compression='infer') as f:
__snake_case = f.read()
assert exported_content == original_content
| 313
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __lowercase :
def __init__( self , lowercase_ = "cpu" , lowercase_ = "openai/clip-vit-large-patch14") -> None:
__snake_case = device
__snake_case = CLIPTokenizerFast.from_pretrained(lowercase_)
__snake_case = [0.4814_5466, 0.457_8275, 0.4082_1073]
__snake_case = [0.2686_2954, 0.2613_0258, 0.2757_7711]
__snake_case = torchvision.transforms.Normalize(self.image_mean , self.image_std)
__snake_case = torchvision.transforms.Resize(2_2_4)
__snake_case = torchvision.transforms.CenterCrop(2_2_4)
def _a ( self , lowercase_) -> int:
__snake_case = self.resize(lowercase_)
__snake_case = self.center_crop(lowercase_)
__snake_case = self.normalize(lowercase_)
return images
def __call__( self , lowercase_=None , lowercase_=None , **lowercase_) -> Union[str, Any]:
__snake_case = self.tokenizer(text=lowercase_ , **lowercase_)
__snake_case = self.preprocess_img(lowercase_)
__snake_case = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class __lowercase ( nn.Module ):
def __init__( self , lowercase_=1_0 , lowercase_=0.01 , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=True , lowercase_="image" , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=False , ) -> None:
super().__init__()
__snake_case = None
__snake_case = device if device else get_device()
if vqgan:
__snake_case = vqgan
else:
__snake_case = load_vqgan(self.device , conf_path=lowercase_ , ckpt_path=lowercase_)
self.vqgan.eval()
if clip:
__snake_case = clip
else:
__snake_case = CLIPModel.from_pretrained('openai/clip-vit-base-patch32')
self.clip.to(self.device)
__snake_case = ProcessorGradientFlow(device=self.device)
__snake_case = iterations
__snake_case = lr
__snake_case = log
__snake_case = make_grid
__snake_case = return_val
__snake_case = quantize
__snake_case = self.vqgan.decoder.z_shape
def _a ( self , lowercase_=None , lowercase_=None , lowercase_=5 , lowercase_=True) -> List[str]:
__snake_case = []
if output_path is None:
__snake_case = './animation.gif'
if input_path is None:
__snake_case = self.save_path
__snake_case = sorted(glob(input_path + '/*'))
if not len(lowercase_):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)')
if len(lowercase_) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)')
__snake_case = total_duration / len(lowercase_)
__snake_case = [frame_duration] * len(lowercase_)
if extend_frames:
__snake_case = 1.5
__snake_case = 3
for file_name in paths:
if file_name.endswith('.png'):
images.append(imageio.imread(lowercase_))
imageio.mimsave(lowercase_ , lowercase_ , duration=lowercase_)
print(F"gif saved to {output_path}")
def _a ( self , lowercase_=None , lowercase_=None) -> Union[str, Any]:
if not (path or img):
raise ValueError('Input either path or tensor')
if img is not None:
raise NotImplementedError
__snake_case = preprocess(Image.open(lowercase_) , target_image_size=2_5_6).to(self.device)
__snake_case = preprocess_vqgan(lowercase_)
__snake_case , *__snake_case = self.vqgan.encode(lowercase_)
return z
def _a ( self , lowercase_) -> Dict:
__snake_case = self.latent.detach().requires_grad_()
__snake_case = base_latent + transform_vector
if self.quantize:
__snake_case , *__snake_case = self.vqgan.quantize(lowercase_)
else:
__snake_case = trans_latent
return self.vqgan.decode(lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_=None) -> Any:
__snake_case = self.clip_preprocessor(text=lowercase_ , images=lowercase_ , return_tensors='pt' , padding=lowercase_)
__snake_case = self.clip(**lowercase_)
__snake_case = clip_outputs.logits_per_image
if weights is not None:
__snake_case = similarity_logits * weights
return similarity_logits.sum()
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
__snake_case = self._get_clip_similarity(pos_prompts['prompts'] , lowercase_ , weights=(1 / pos_prompts['weights']))
if neg_prompts:
__snake_case = self._get_clip_similarity(neg_prompts['prompts'] , lowercase_ , weights=neg_prompts['weights'])
else:
__snake_case = torch.tensor([1] , device=self.device)
__snake_case = -torch.log(lowercase_) + torch.log(lowercase_)
return loss
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
__snake_case = torch.randn_like(self.latent , requires_grad=lowercase_ , device=self.device)
__snake_case = torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
__snake_case = self._add_vector(lowercase_)
__snake_case = loop_post_process(lowercase_)
__snake_case = self._get_CLIP_loss(lowercase_ , lowercase_ , lowercase_)
print('CLIP loss' , lowercase_)
if self.log:
wandb.log({'CLIP Loss': clip_loss})
clip_loss.backward(retain_graph=lowercase_)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
wandb.init(reinit=lowercase_ , project='face-editor')
wandb.config.update({'Positive Prompts': positive_prompts})
wandb.config.update({'Negative Prompts': negative_prompts})
wandb.config.update({'lr': self.lr, 'iterations': self.iterations})
if image_path:
__snake_case = Image.open(lowercase_)
__snake_case = image.resize((2_5_6, 2_5_6))
wandb.log('Original Image' , wandb.Image(lowercase_))
def _a ( self , lowercase_) -> Optional[int]:
if not prompts:
return []
__snake_case = []
__snake_case = []
if isinstance(lowercase_ , lowercase_):
__snake_case = [prompt.strip() for prompt in prompts.split('|')]
for prompt in prompts:
if isinstance(lowercase_ , (tuple, list)):
__snake_case = prompt[0]
__snake_case = float(prompt[1])
elif ":" in prompt:
__snake_case , __snake_case = prompt.split(':')
__snake_case = float(lowercase_)
else:
__snake_case = prompt
__snake_case = 1.0
processed_prompts.append(lowercase_)
weights.append(lowercase_)
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase_ , device=self.device),
}
def _a ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=None , ) -> List[str]:
if image_path:
__snake_case = self._get_latent(lowercase_)
else:
__snake_case = torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(lowercase_ , lowercase_ , lowercase_)
assert pos_prompts, "You must provide at least one positive prompt."
__snake_case = self.process_prompts(lowercase_)
__snake_case = self.process_prompts(lowercase_)
if save_final and save_path is None:
__snake_case = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts']))
if not os.path.exists(lowercase_):
os.makedirs(lowercase_)
else:
__snake_case = save_path + '_' + get_timestamp()
os.makedirs(lowercase_)
__snake_case = save_path
__snake_case = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print('Original Image')
show_pil(custom_to_pil(lowercase_))
__snake_case = loop_post_process(lowercase_)
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase_ , lowercase_ , lowercase_)):
if show_intermediate:
show_pil(lowercase_)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png"))
if self.log:
wandb.log({'Image': wandb.Image(lowercase_)})
if show_final:
show_pil(lowercase_)
if save_final:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png"))
| 313
| 1
|
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = tmp_path / """cache"""
_UpperCAmelCase : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : List[Any] = ParquetDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_parquet_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = tmp_path / """cache"""
_UpperCAmelCase : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : str = features.copy() if features else default_expected_features
_UpperCAmelCase : Dict = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Dict = ParquetDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_parquet_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Dict = tmp_path / """cache"""
_UpperCAmelCase : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : Any = ParquetDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , split=lowerCAmelCase_ ).read()
_check_parquet_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = parquet_path
elif issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Dict = [parquet_path]
_UpperCAmelCase : Optional[Any] = tmp_path / """cache"""
_UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : Tuple = ParquetDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_parquet_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=("train",) ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
for split in splits:
_UpperCAmelCase : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
_UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : Optional[int] = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_parquet_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = tmp_path / """cache"""
_UpperCAmelCase : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : List[str] = features.copy() if features else default_expected_features
_UpperCAmelCase : int = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Tuple = ParquetDatasetReader({"""train""": parquet_path} , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_parquet_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if split:
_UpperCAmelCase : Any = {split: parquet_path}
else:
_UpperCAmelCase : Optional[int] = """train"""
_UpperCAmelCase : Tuple = {"""train""": parquet_path, """test""": parquet_path}
_UpperCAmelCase : Any = tmp_path / """cache"""
_UpperCAmelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : Optional[int] = ParquetDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_parquet_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = ParquetDatasetWriter(lowerCAmelCase_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_UpperCAmelCase : Dict = pq.ParquetFile(tmp_path / """foo.parquet""" )
_UpperCAmelCase : str = pf.read()
assert dataset.data.table == output_table
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = str(shared_datadir / """test_image_rgb.jpg""" )
_UpperCAmelCase : Union[str, Any] = {"""image""": [image_path]}
_UpperCAmelCase : List[Any] = Features({"""image""": Image()} )
_UpperCAmelCase : List[Any] = Dataset.from_dict(lowerCAmelCase_ , features=lowerCAmelCase_ )
_UpperCAmelCase : int = ParquetDatasetWriter(lowerCAmelCase_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_UpperCAmelCase : str = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_UpperCAmelCase : Dict = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=lowerCAmelCase_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
assert get_writer_batch_size(lowerCAmelCase_ ) == expected
| 707
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ : Dict = logging.getLogger(__name__)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
snake_case : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCAmelCase :
snake_case : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
snake_case : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
snake_case : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
try:
_UpperCAmelCase : Union[str, Any] = processors[data_args.task_name]()
_UpperCAmelCase : int = processor.get_labels()
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCAmelCase : int = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCAmelCase : Tuple = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCAmelCase_ ) -> Dict:
_UpperCAmelCase : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCAmelCase_ , p.label_ids )}
# Data collator
_UpperCAmelCase : List[str] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCAmelCase : List[Any] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase : int = trainer.evaluate()
_UpperCAmelCase : List[str] = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCAmelCase_ )
return results
def __A ( lowerCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 156
| 0
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase = """examples/"""
_lowercase = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase = """README.md"""
def A (__lowerCamelCase :Any , __lowerCamelCase :Optional[int] , __lowerCamelCase :Optional[Any] ):
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase = f.read()
_lowerCAmelCase , _lowerCAmelCase = REPLACE_PATTERNS[pattern]
_lowerCAmelCase = replace.replace("""VERSION""" , __lowerCamelCase )
_lowerCAmelCase = re_pattern.sub(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCamelCase )
def A (__lowerCamelCase :Optional[int] ):
for folder, directories, fnames in os.walk(__lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , pattern="""examples""" )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Union[str, Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not patch:
update_version_in_examples(__lowerCamelCase )
def A ():
_lowerCAmelCase = """🤗 Transformers currently provides the following architectures"""
_lowerCAmelCase = """1. Want to contribute a new model?"""
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase = f.readlines()
# Find the start of the list.
_lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_lowerCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCamelCase )
def A ():
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_lowerCAmelCase = f.read()
_lowerCAmelCase = REPLACE_PATTERNS["""init"""][0].search(__lowerCamelCase ).groups()[0]
return packaging.version.parse(__lowerCamelCase )
def A (__lowerCamelCase :Union[str, Any]=False ):
_lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_lowerCAmelCase = default_version.base_version
elif patch:
_lowerCAmelCase = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
_lowerCAmelCase = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
_lowerCAmelCase = input(f'Which version are you releasing? [{default_version}]' )
if len(__lowerCamelCase ) == 0:
_lowerCAmelCase = default_version
print(f'Updating version to {version}.' )
global_version_update(__lowerCamelCase , patch=__lowerCamelCase )
def A ():
_lowerCAmelCase = get_version()
_lowerCAmelCase = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
_lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
_lowerCAmelCase = input(f'Which version are we developing now? [{dev_version}]' )
if len(__lowerCamelCase ) == 0:
_lowerCAmelCase = dev_version
print(f'Updating version to {version}.' )
global_version_update(__lowerCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 5
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 191
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase = {
'''squeezebert/squeezebert-uncased''': 512,
'''squeezebert/squeezebert-mnli''': 512,
'''squeezebert/squeezebert-mnli-headless''': 512,
}
_lowerCAmelCase = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class A ( _lowercase ):
'''simple docstring'''
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = SqueezeBertTokenizer
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> str:
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
__UpperCamelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , A_ ) != do_lower_case
or normalizer_state.get("strip_accents" , A_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A_ ) != tokenize_chinese_chars
):
__UpperCamelCase : Any = getattr(A_ , normalizer_state.pop("type" ) )
__UpperCamelCase : List[str] = do_lower_case
__UpperCamelCase : Any = strip_accents
__UpperCamelCase : List[str] = tokenize_chinese_chars
__UpperCamelCase : str = normalizer_class(**A_ )
__UpperCamelCase : List[str] = do_lower_case
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Any:
__UpperCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
__UpperCamelCase : List[str] = [self.sep_token_id]
__UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
__UpperCamelCase : Tuple = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 700
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
__UpperCamelCase : List[Any] = hex_num[0] == "-"
if is_negative:
__UpperCamelCase : str = hex_num[1:]
try:
__UpperCamelCase : Optional[int] = int(snake_case__ , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
__UpperCamelCase : Tuple = ""
while int_num > 0:
__UpperCamelCase : Union[str, Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 399
| 0
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCamelCase__: List[Any] = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : Tuple , *__snake_case : List[str] , **__snake_case : List[str] ) -> int:
super().__init__(*__snake_case , **__snake_case )
self.check_model_type(__snake_case )
def A ( self : Tuple , __snake_case : Optional[int]=None , __snake_case : Any=None , __snake_case : Tuple=None , **__snake_case : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = {}, {}
if padding is not None:
UpperCAmelCase : int = padding
if truncation is not None:
UpperCAmelCase : List[Any] = truncation
if top_k is not None:
UpperCAmelCase : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , __snake_case : Union["Image.Image", str] , __snake_case : str = None , **__snake_case : Tuple ) -> Any:
if isinstance(__snake_case , (Image.Image, str) ) and isinstance(__snake_case , __snake_case ):
UpperCAmelCase : Union[str, Any] = {'''image''': image, '''question''': question}
else:
UpperCAmelCase : Tuple = image
UpperCAmelCase : Optional[Any] = super().__call__(__snake_case , **__snake_case )
return results
def A ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any]=False , __snake_case : Tuple=False ) -> Optional[Any]:
UpperCAmelCase : Dict = load_image(inputs['''image'''] )
UpperCAmelCase : List[str] = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=__snake_case , truncation=__snake_case )
UpperCAmelCase : List[Any] = self.image_processor(images=__snake_case , return_tensors=self.framework )
model_inputs.update(__snake_case )
return model_inputs
def A ( self : int , __snake_case : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Dict = self.model(**__snake_case )
return model_outputs
def A ( self : Any , __snake_case : Tuple , __snake_case : Tuple=5 ) -> Any:
if top_k > self.model.config.num_labels:
UpperCAmelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase : Dict = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase : List[str] = probs.topk(__snake_case )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
UpperCAmelCase : Any = scores.tolist()
UpperCAmelCase : Optional[int] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__snake_case , __snake_case )]
| 127
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class __lowerCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] =["pixel_values"]
def __init__( self : Optional[int] , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 2_55 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : bool = True , **lowerCAmelCase : List[Any] , ):
super().__init__(**lowerCAmelCase )
A_ = size if size is not None else {"height": 3_84, "width": 3_84}
A_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
A_ = do_resize
A_ = size
A_ = resample
A_ = do_rescale
A_ = rescale_factor
A_ = do_normalize
A_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ = image_std if image_std is not None else OPENAI_CLIP_STD
A_ = do_convert_rgb
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Tuple , ):
A_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
A_ = (size["height"], size["width"])
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def _UpperCAmelCase ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Dict , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase : ImageInput , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Dict[str, int]] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[float] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : bool = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : List[str] , ):
A_ = do_resize if do_resize is not None else self.do_resize
A_ = resample if resample is not None else self.resample
A_ = do_rescale if do_rescale is not None else self.do_rescale
A_ = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = image_mean if image_mean is not None else self.image_mean
A_ = image_std if image_std is not None else self.image_std
A_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ = size if size is not None else self.size
A_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
A_ = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ = [convert_to_rgb(lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
A_ = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
A_ = [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_rescale:
A_ = [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
if do_normalize:
A_ = [self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images]
A_ = [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
A_ = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCAmelCase )
return encoded_outputs
| 452
| 0
|
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = 'Alexander Joslin'
import operator as op
from .stack import Stack
def _snake_case ( lowercase ) -> Dict:
__a : str = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
__a : List[str] = Stack()
__a : Tuple = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(A__ ) )
elif i in operators:
# RULE 2
operator_stack.push(A__ )
elif i == ")":
# RULE 4
__a : Any = operator_stack.peek()
operator_stack.pop()
__a : Optional[Any] = operand_stack.peek()
operand_stack.pop()
__a : List[Any] = operand_stack.peek()
operand_stack.pop()
__a : Any = operators[opr](A__ , A__ )
operand_stack.push(A__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 714
|
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 697
| 0
|
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =RoFormerTokenizer
a_ =RoFormerTokenizerFast
a_ =True
a_ =True
def _lowercase ( self : str ) -> int:
super().setUp()
def _lowercase ( self : List[str] , **_a : Optional[int] ) -> List[str]:
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **_a )
def _lowercase ( self : List[str] , **_a : Optional[int] ) -> List[Any]:
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **_a )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = '永和服装饰品有限公司,今天天气非常好'
__lowerCamelCase : Tuple = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def _lowercase ( self : List[str] ) -> str:
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase ,__lowerCamelCase : int = self.get_chinese_input_output_texts()
__lowerCamelCase : Dict = tokenizer.tokenize(_a )
self.assertListEqual(_a , output_text.split() )
__lowerCamelCase : List[str] = tokens + [tokenizer.unk_token]
__lowerCamelCase : Optional[Any] = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def _lowercase ( self : Tuple ) -> str:
__lowerCamelCase : int = self.get_rust_tokenizer()
__lowerCamelCase ,__lowerCamelCase : Any = self.get_chinese_input_output_texts()
__lowerCamelCase : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a , output_text.split() )
__lowerCamelCase : Tuple = tokens + [tokenizer.unk_token]
__lowerCamelCase : Union[str, Any] = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def _lowercase ( self : Dict ) -> List[str]:
pass
def _lowercase ( self : Tuple ) -> List[Any]:
pass
def _lowercase ( self : Any ) -> Union[str, Any]:
pass
| 459
|
'''simple docstring'''
import string
def a_ ( _lowerCAmelCase ) -> str:
__lowerCamelCase : Union[str, Any] = ''
for i in sequence:
__lowerCamelCase : Tuple = ord(_lowerCAmelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def a_ ( _lowerCAmelCase ) -> str:
__lowerCamelCase : Optional[Any] = string.ascii_letters
__lowerCamelCase : str = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_lowerCAmelCase )] if c in letters else c for c in sequence )
def a_ ( ) -> None:
from timeit import timeit
print('Running performance benchmarks...' )
__lowerCamelCase : Tuple = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" ,setup=_lowerCAmelCase )} seconds' )
print(F'> atbash(): {timeit("atbash(printable)" ,setup=_lowerCAmelCase )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 459
| 1
|
import sys
__lowerCamelCase : int = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def A__ ( _a : str = N ):
'''simple docstring'''
snake_case__ : Union[str, Any] =-sys.maxsize - 1
for i in range(len(_a ) - 12 ):
snake_case__ : Optional[int] =1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case__ : str =product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 448
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( _a : int , _a : Any , _a : Union[str, Any] , _a : Tuple ):
'''simple docstring'''
snake_case__ : Any =BigBirdConfig.from_json_file(_a )
print(f"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
snake_case__ : str =BigBirdForQuestionAnswering(_a )
else:
snake_case__ : Optional[int] =BigBirdForPreTraining(_a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_a , _a , is_trivia_qa=_a )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_a )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 448
| 1
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a : Tuple = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Any=None ) ->Union[str, Any]:
'''simple docstring'''
if subparsers is not None:
a : Optional[int] = subparsers.add_parser("tpu-config" , description=_description )
else:
a : Optional[Any] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
a : Dict = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
a : int = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Any:
'''simple docstring'''
a : List[Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowercase ):
a : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a : Union[str, Any] = defaults.command_file
if not args.command and defaults.commands is not None:
a : Union[str, Any] = defaults.commands
if not args.tpu_name:
a : List[Any] = defaults.tpu_name
if not args.tpu_zone:
a : str = defaults.tpu_zone
if args.accelerate_version == "dev":
a : List[str] = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
a : Union[str, Any] = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _lowercase ):
a : Optional[Any] = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
a : List[str] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowercase ):
a : List[str] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a : int = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
a : List[str] = "; ".join(_lowercase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a : Union[str, Any] = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(_lowercase )}""" )
return
subprocess.run(_lowercase )
print("Successfully setup pod." )
def _SCREAMING_SNAKE_CASE ( ) ->Optional[int]:
'''simple docstring'''
a : Tuple = tpu_command_parser()
a : int = parser.parse_args()
tpu_command_launcher(_lowercase )
| 633
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Dict:
'''simple docstring'''
a : Any = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a : str = True if "large" in model_name or "huge" in model_name else False
a : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
a : Dict = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a : Union[str, Any] = [3, 3, 3, 3]
a : List[str] = [5, 5, 5, 5]
elif "fl4" in model_name:
a : Any = [4, 4, 4, 4]
a : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a : Dict = [3, 3, 3, 3]
if "lrf" in model_name:
a : Optional[int] = [3, 3, 3, 3]
else:
a : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
a : List[str] = 96
elif "small" in model_name:
a : Union[str, Any] = 96
elif "base" in model_name:
a : Dict = 128
elif "large" in model_name:
a : Union[str, Any] = 192
elif "xlarge" in model_name:
a : Tuple = 256
elif "huge" in model_name:
a : List[str] = 352
# set label information
a : List[Any] = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a : Optional[int] = "imagenet-22k-id2label.json"
else:
a : List[str] = "imagenet-1k-id2label.json"
a : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="dataset" ) , "r" ) )
a : str = {int(_lowercase ): v for k, v in idalabel.items()}
a : List[str] = {v: k for k, v in idalabel.items()}
a : Dict = FocalNetConfig(
embed_dim=_lowercase , depths=_lowercase , focal_levels=_lowercase , focal_windows=_lowercase , use_conv_embed=_lowercase , idalabel=_lowercase , labelaid=_lowercase , use_post_layernorm=_lowercase , use_layerscale=_lowercase , )
return config
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->List[Any]:
'''simple docstring'''
if "patch_embed.proj" in name:
a : Any = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a : List[str] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a : List[Any] = "encoder." + name
if "encoder.layers" in name:
a : int = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a : Any = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a : str = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a : Union[str, Any] = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a : Dict = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a : Any = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a : str = "layernorm.weight"
if name == "norm.bias":
a : Optional[Any] = "layernorm.bias"
if "head" in name:
a : Tuple = name.replace("head" , "classifier" )
else:
a : int = "focalnet." + name
return name
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Tuple=False ) ->str:
'''simple docstring'''
a : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a : str = model_name_to_url[model_name]
print("Checkpoint URL: " , _lowercase )
a : Any = torch.hub.load_state_dict_from_url(_lowercase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a : Any = state_dict.pop(_lowercase )
a : Any = val
a : Any = get_focalnet_config(_lowercase )
a : Optional[int] = FocalNetForImageClassification(_lowercase )
model.eval()
# load state dict
model.load_state_dict(_lowercase )
# verify conversion
a : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Optional[int] = BitImageProcessor(
do_resize=_lowercase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowercase , crop_size=224 , do_normalize=_lowercase , image_mean=_lowercase , image_std=_lowercase , )
a : int = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
a : Dict = processor(images=_lowercase , return_tensors="pt" )
a : Optional[int] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a : str = image_transforms(_lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowercase , atol=1E-4 )
a : Dict = model(**_lowercase )
a : List[str] = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a : Union[str, Any] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a : Dict = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a : Dict = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a : Any = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a : str = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
a : Tuple = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 633
| 1
|
from math import sqrt
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
_UpperCamelCase = True
# 0 and 1 are none primes.
if number <= 1:
_UpperCamelCase = False
for divisor in range(2 , int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_UpperCamelCase = False
break
# precondition
assert isinstance(__snake_case , __snake_case ), "'status' must been from type bool"
return status
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_UpperCamelCase = list(range(2 , n + 1 ) )
_UpperCamelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 , len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_UpperCamelCase = 0
# filters actual prime numbers.
_UpperCamelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
_UpperCamelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and number >= 0, "'number' must been an int and >= 0"
_UpperCamelCase = [] # this list will be returns of the function.
# potential prime number factors.
_UpperCamelCase = 2
_UpperCamelCase = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCamelCase = 0
# prime factorization of 'number'
_UpperCamelCase = prime_factorization(__snake_case )
_UpperCamelCase = max(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCamelCase = 0
# prime factorization of 'number'
_UpperCamelCase = prime_factorization(__snake_case )
_UpperCamelCase = min(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __snake_case ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __snake_case ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case ( __snake_case ):
assert (
isinstance(__snake_case , __snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
_UpperCamelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_UpperCamelCase = get_prime_numbers(__snake_case )
_UpperCamelCase = len(__snake_case )
# run variable for while-loops.
_UpperCamelCase = 0
_UpperCamelCase = None
# exit variable. for break up the loops
_UpperCamelCase = True
while i < len_pn and loop:
_UpperCamelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_UpperCamelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case ( __snake_case , __snake_case ):
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_UpperCamelCase = 0
while numbera != 0:
_UpperCamelCase = numbera % numbera
_UpperCamelCase = numbera
_UpperCamelCase = rest
# precondition
assert isinstance(__snake_case , __snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case ( __snake_case , __snake_case ):
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_UpperCamelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_UpperCamelCase = prime_factorization(__snake_case )
_UpperCamelCase = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = max(__snake_case , __snake_case )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_UpperCamelCase = prime_fac_a.count(__snake_case )
_UpperCamelCase = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case , __snake_case ) ):
ans *= n
else:
_UpperCamelCase = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_UpperCamelCase = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'number' must been a positive int"
_UpperCamelCase = 0
_UpperCamelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case , __snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def _snake_case ( __snake_case , __snake_case ):
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_UpperCamelCase = p_number_a + 1 # jump to the next number
_UpperCamelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (n >= 1), "'n' must been int and >= 1"
_UpperCamelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
_UpperCamelCase = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case ( __snake_case , __snake_case ):
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_UpperCamelCase = gcd(abs(__snake_case ) , abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been a int and >= 0"
_UpperCamelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been an int and >= 0"
_UpperCamelCase = 0
_UpperCamelCase = 1
_UpperCamelCase = 1 # this will be return
for _ in range(n - 1 ):
_UpperCamelCase = ans
ans += fiba
_UpperCamelCase = tmp
return ans
| 71
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "gpt_neox"
def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ):
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = use_parallel_residual
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def UpperCamelCase_ ( self : str ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
_UpperCamelCase = self.rope_scaling.get('''type''' , _A )
_UpperCamelCase = self.rope_scaling.get('''factor''' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 71
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase : Dict = None
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : List[str] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : Optional[Any] = {
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
UpperCamelCase : int = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = ['input_ids', 'attention_mask']
_lowercase = NllbTokenizer
_lowercase = []
_lowercase = []
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Tuple="<s>" , lowerCamelCase__ : Optional[Any]="</s>" , lowerCamelCase__ : str="</s>" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="<unk>" , lowerCamelCase__ : Optional[Any]="<pad>" , lowerCamelCase__ : Union[str, Any]="<mask>" , lowerCamelCase__ : Any=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=False , **lowerCamelCase__ : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
a__ : Dict = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
a__ : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , legacy_behaviour=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : Union[str, Any] = vocab_file
a__ : Optional[Any] = False if not self.vocab_file else True
a__ : Any = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
a__ : Dict = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a__ : List[Any] = src_lang if src_lang is not None else "eng_Latn"
a__ : Tuple = self.convert_tokens_to_ids(self._src_lang )
a__ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase( self : Any , lowerCamelCase__ : str ):
a__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : Union[str, Any] = [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] , lowerCamelCase__ : Optional[str] , **lowerCamelCase__ : List[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a__ : Tuple = src_lang
a__ : List[str] = self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
a__ : Optional[int] = self.convert_tokens_to_ids(lowerCamelCase__ )
a__ : Optional[int] = tgt_lang_id
return inputs
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : str = "eng_Latn" , lowerCamelCase__ : Optional[List[str]] = None , lowerCamelCase__ : str = "fra_Latn" , **lowerCamelCase__ : Dict , ):
a__ : Optional[Any] = src_lang
a__ : Any = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase( self : str ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase( self : str , lowerCamelCase__ : Tuple ):
a__ : Union[str, Any] = self.convert_tokens_to_ids(lowerCamelCase__ )
if self.legacy_behaviour:
a__ : List[Any] = []
a__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
a__ : List[str] = [self.cur_lang_code]
a__ : Any = [self.eos_token_id]
a__ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
a__ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
a__ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : str ):
a__ : List[Any] = self.convert_tokens_to_ids(lowerCamelCase__ )
if self.legacy_behaviour:
a__ : Optional[Any] = []
a__ : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
a__ : Optional[int] = [self.cur_lang_code]
a__ : List[Any] = [self.eos_token_id]
a__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
a__ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
a__ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
a__ : int = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 37
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ) -> Any:
"""simple docstring"""
lowerCAmelCase = 1.5
lowerCAmelCase = int(factor * num_class_images )
lowerCAmelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=_SCREAMING_SNAKE_CASE )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
lowerCAmelCase = client.query(text=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowerCAmelCase = int(factor * num_images )
lowerCAmelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = tqdm(desc="""downloading real regularization images""" , total=_SCREAMING_SNAKE_CASE )
with open(f'{class_data_dir}/caption.txt' , """w""" ) as fa, open(f'{class_data_dir}/urls.txt' , """w""" ) as fa, open(
f'{class_data_dir}/images.txt' , """w""" ) as fa:
while total < num_class_images:
lowerCAmelCase = class_images[count]
count += 1
try:
lowerCAmelCase = requests.get(images["""url"""] )
if img.status_code == 200:
lowerCAmelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _snake_case ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = argparse.ArgumentParser("""""" , add_help=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=_SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 433
| 0
|
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
A_ : Optional[int] = logging.getLogger()
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
return args.f
class lowerCamelCase (A__ ):
def SCREAMING_SNAKE_CASE ( self : int ) -> None:
SCREAMING_SNAKE_CASE__ = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(__UpperCAmelCase , """argv""" , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__UpperCAmelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(__UpperCAmelCase )
| 616
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Tuple = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : Tuple = concatenate_datasets
A_ : List[str] = DownloadConfig
A_ : int = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : Optional[int] = DownloadConfig
A_ : List[Any] = DownloadMode
A_ : Any = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 616
| 1
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__lowerCamelCase : Any = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__lowerCamelCase : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
if "://" in dataset_path:
SCREAMING_SNAKE_CASE_ : Dict = dataset_path.split("://" )[1]
return dataset_path
def _snake_case ( lowerCAmelCase : fsspec.AbstractFileSystem ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _snake_case ( lowerCAmelCase : fsspec.AbstractFileSystem , lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = not is_remote_filesystem(lowerCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCAmelCase ) , fs._strip_protocol(lowerCAmelCase ) )
else:
fs.mv(lowerCAmelCase , lowerCAmelCase , recursive=lowerCAmelCase )
def _snake_case ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : int = threading.Lock()
| 216
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Any = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ['''DPTFeatureExtractor''']
__lowerCamelCase : List[Any] = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 216
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[Any] = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[Any] = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""flax"""] )
| 160
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> Optional[Any]:
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : Optional[int] = use_auxiliary_loss
lowerCAmelCase__ : Optional[Any] = num_queries
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : List[Any] = min_size
lowerCAmelCase__ : Dict = max_size
lowerCAmelCase__ : Dict = num_labels
lowerCAmelCase__ : Any = mask_feature_size
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__UpperCAmelCase )
lowerCAmelCase__ : List[str] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5
).float()
lowerCAmelCase__ : List[str] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long()
lowerCAmelCase__ : List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self ) -> Any:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig(
decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,)
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Tuple = output.encoder_hidden_states
lowerCAmelCase__ : Dict = output.pixel_decoder_hidden_states
lowerCAmelCase__ : List[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> int:
with torch.no_grad():
lowerCAmelCase__ : List[str] = MaskFormerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : List[str] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[Any] = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
def comm_check_on_output(__UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model(
pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__lowercase : Optional[int] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__lowercase : List[Any] = False
__lowercase : str = False
__lowercase : Tuple = False
__lowercase : Optional[Any] = False
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Any = MaskFormerModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def UpperCAmelCase_ ( self ) -> Any:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def UpperCAmelCase_ ( self ) -> int:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase_ ( self ) -> Tuple:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase__ : Dict = MaskFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Any = (self.model_tester.min_size,) * 2
lowerCAmelCase__ : Union[str, Any] = {
"""pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ),
"""class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(),
}
lowerCAmelCase__ : Optional[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self ) -> Any:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ : Optional[int] = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : str = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> Tuple:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ : Optional[Any] = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : int = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ : int = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase )
lowerCAmelCase__ : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase__ : Optional[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase__ : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase__ : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCAmelCase = 1e-4
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : str = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self.default_image_processor
lowerCAmelCase__ : Dict = prepare_img()
lowerCAmelCase__ : Any = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : Any = model(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
lowerCAmelCase__ : List[Any] = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
lowerCAmelCase__ : Tuple = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : int = self.default_image_processor
lowerCAmelCase__ : Dict = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(**__UpperCAmelCase )
# masks_queries_logits
lowerCAmelCase__ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowerCAmelCase__ : Optional[int] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
lowerCAmelCase__ : Dict = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
# class_queries_logits
lowerCAmelCase__ : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : List[str] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(**__UpperCAmelCase )
# masks_queries_logits
lowerCAmelCase__ : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowerCAmelCase__ : str = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
lowerCAmelCase__ : Union[str, Any] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
# class_queries_logits
lowerCAmelCase__ : Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : List[str] = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : List[str] = self.default_image_processor
lowerCAmelCase__ : Tuple = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase )
lowerCAmelCase__ : int = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]]
lowerCAmelCase__ : int = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 160
| 1
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :List[str] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCAmelCase_ ( self , _lowercase=0 )-> str:
UpperCamelCase_ = np.random.RandomState(_lowercase )
UpperCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**_lowercase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_ = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCamelCase_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**_lowercase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_ = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCamelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**_lowercase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_ = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCamelCase_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**_lowercase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_ = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCamelCase_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**_lowercase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_ = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**_lowercase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_ = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = 3 * [inputs["prompt"]]
# forward
UpperCamelCase_ = pipe(**_lowercase )
UpperCamelCase_ = output.images[0, -3:, -3:, -1]
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = 3 * [inputs.pop("prompt" )]
UpperCamelCase_ = pipe.tokenizer(
_lowercase , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors="np" , )
UpperCamelCase_ = text_inputs["input_ids"]
UpperCamelCase_ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
UpperCamelCase_ = prompt_embeds
# forward
UpperCamelCase_ = pipe(**_lowercase )
UpperCamelCase_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = 3 * ["this is a negative prompt"]
UpperCamelCase_ = negative_prompt
UpperCamelCase_ = 3 * [inputs["prompt"]]
# forward
UpperCamelCase_ = pipe(**_lowercase )
UpperCamelCase_ = output.images[0, -3:, -3:, -1]
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = 3 * [inputs.pop("prompt" )]
UpperCamelCase_ = []
for p in [prompt, negative_prompt]:
UpperCamelCase_ = pipe.tokenizer(
_lowercase , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors="np" , )
UpperCamelCase_ = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
UpperCamelCase_ , UpperCamelCase_ = embeds
# forward
UpperCamelCase_ = pipe(**_lowercase )
UpperCamelCase_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
@property
def UpperCAmelCase_ ( self )-> Tuple:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = ort.SessionOptions()
UpperCamelCase_ = False
return options
def UpperCAmelCase_ ( self )-> Any:
# using the PNDM scheduler by default
UpperCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = "A painting of a squirrel eating a burger"
np.random.seed(0 )
UpperCamelCase_ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
UpperCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = "open neural network exchange"
UpperCamelCase_ = np.random.RandomState(0 )
UpperCamelCase_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type="np" )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
UpperCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = "open neural network exchange"
UpperCamelCase_ = np.random.RandomState(0 )
UpperCamelCase_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type="np" )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = 0
def test_callback_fn(_lowercase , _lowercase , _lowercase ) -> None:
UpperCamelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase_ = latents[0, -3:, -3:, -1]
UpperCamelCase_ = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase_ = latents[0, -3:, -3:, -1]
UpperCamelCase_ = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
UpperCamelCase_ = False
UpperCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = "Andromeda galaxy in a bottle"
UpperCamelCase_ = np.random.RandomState(0 )
pipe(
prompt=_lowercase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowercase , callback=_lowercase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowercase , _lowercase )
assert pipe.safety_checker is None
UpperCamelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowercase )
UpperCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(_lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 628
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = SwinvaConfig()
UpperCamelCase_ = swinva_name.split("_" )
UpperCamelCase_ = name_split[1]
if "to" in name_split[3]:
UpperCamelCase_ = int(name_split[3][-3:] )
else:
UpperCamelCase_ = int(name_split[3] )
if "to" in name_split[2]:
UpperCamelCase_ = int(name_split[2][-2:] )
else:
UpperCamelCase_ = int(name_split[2][6:] )
if model_size == "tiny":
UpperCamelCase_ = 9_6
UpperCamelCase_ = (2, 2, 6, 2)
UpperCamelCase_ = (3, 6, 1_2, 2_4)
elif model_size == "small":
UpperCamelCase_ = 9_6
UpperCamelCase_ = (2, 2, 1_8, 2)
UpperCamelCase_ = (3, 6, 1_2, 2_4)
elif model_size == "base":
UpperCamelCase_ = 1_2_8
UpperCamelCase_ = (2, 2, 1_8, 2)
UpperCamelCase_ = (4, 8, 1_6, 3_2)
else:
UpperCamelCase_ = 1_9_2
UpperCamelCase_ = (2, 2, 1_8, 2)
UpperCamelCase_ = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
UpperCamelCase_ = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
UpperCamelCase_ = 2_1_8_4_1
UpperCamelCase_ = "huggingface/label-files"
UpperCamelCase_ = "imagenet-22k-id2label.json"
UpperCamelCase_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
UpperCamelCase_ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase_ = 1_0_0_0
UpperCamelCase_ = "huggingface/label-files"
UpperCamelCase_ = "imagenet-1k-id2label.json"
UpperCamelCase_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
UpperCamelCase_ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
UpperCamelCase_ = img_size
UpperCamelCase_ = num_classes
UpperCamelCase_ = embed_dim
UpperCamelCase_ = depths
UpperCamelCase_ = num_heads
UpperCamelCase_ = window_size
return config
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCamelCase_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
UpperCamelCase_ = "encoder." + name
if "attn.proj" in name:
UpperCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
UpperCamelCase_ = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
UpperCamelCase_ = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
UpperCamelCase_ = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
UpperCamelCase_ = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
UpperCamelCase_ = "layernorm.weight"
if name == "norm.bias":
UpperCamelCase_ = "layernorm.bias"
if "head" in name:
UpperCamelCase_ = name.replace("head" , "classifier" )
else:
UpperCamelCase_ = "swinv2." + name
return name
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase_ = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase_ = key.split("." )
UpperCamelCase_ = int(key_split[1] )
UpperCamelCase_ = int(key_split[3] )
UpperCamelCase_ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase_ = val[:dim, :]
UpperCamelCase_ = val[dim : dim * 2, :]
UpperCamelCase_ = val[-dim:, :]
else:
UpperCamelCase_ = val[:dim]
UpperCamelCase_ = val[
dim : dim * 2
]
UpperCamelCase_ = val[-dim:]
else:
UpperCamelCase_ = val
return orig_state_dict
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
UpperCamelCase_ = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
UpperCamelCase_ = get_swinva_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = SwinvaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase_ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase_ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
UpperCamelCase_ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
UpperCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
UpperCamelCase_ = timm_model(inputs["pixel_values"] )
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
print(f"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 628
| 1
|
"""simple docstring"""
def _snake_case ( _snake_case : int ):
if length <= 0 or not isinstance(_snake_case , _snake_case ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_snake_case )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 637
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ : int = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _snake_case ( _snake_case : list[list[int]] ):
lowerCAmelCase : Union[str, Any] = []
for i in range(len(_snake_case ) ):
lowerCAmelCase : Any = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_snake_case ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_snake_case ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase : str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_snake_case )
return next_generation
def _snake_case ( _snake_case : list[list[int]] , _snake_case : int ):
lowerCAmelCase : int = []
for _ in range(_snake_case ):
# Create output image
lowerCAmelCase : Union[str, Any] = Image.new('''RGB''' , (len(cells[0] ), len(_snake_case )) )
lowerCAmelCase : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(_snake_case ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase : Optional[int] = 255 - cells[y][x] * 255
lowerCAmelCase : List[Any] = (colour, colour, colour)
# Save image
images.append(_snake_case )
lowerCAmelCase : Union[str, Any] = new_generation(_snake_case )
return images
if __name__ == "__main__":
snake_case__ : Union[str, Any] = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 637
| 1
|
'''simple docstring'''
from __future__ import annotations
__lowercase : Any = list[tuple[int, int]]
__lowercase : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : List[Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A , A , ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : Union[str, Any] = pos_y
lowerCamelCase_ : Dict = (pos_y, pos_x)
lowerCamelCase_ : Optional[int] = goal_x
lowerCamelCase_ : Dict = goal_y
lowerCamelCase_ : Tuple = g_cost
lowerCamelCase_ : int = parent
lowerCamelCase_ : Optional[Any] = self.calculate_heuristic()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = abs(self.pos_x - self.goal_x )
lowerCamelCase_ : Optional[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__(self , A ):
return self.f_cost < other.f_cost
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A )
lowerCamelCase_ : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , A )
lowerCamelCase_ : int = [self.start]
lowerCamelCase_ : list[Node] = []
lowerCamelCase_ : Any = False
def UpperCAmelCase__ (self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase_ : Any = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : Dict = True
return self.retrace_path(A )
self.closed_nodes.append(A )
lowerCamelCase_ : int = self.get_successors(A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A )
else:
# retrieve the best current path
lowerCamelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A )
else:
self.open_nodes.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[Any] = []
for action in delta:
lowerCamelCase_ : Union[str, Any] = parent.pos_x + action[1]
lowerCamelCase_ : Any = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A , A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A , ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[Any] = node
lowerCamelCase_ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : Optional[int] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__lowercase : Any = (0, 0)
__lowercase : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
__lowercase : Optional[Any] = GreedyBestFirst(init, goal)
__lowercase : Tuple = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__lowercase : List[Any] = 2
for elem in grid:
print(elem)
| 422
|
lowercase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->str:
"""simple docstring"""
assert len(str(UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__magic_name__ : Optional[Any] = year // 100
__magic_name__ : Any = (5 * (century % 4) + 2) % 7
__magic_name__ : Any = year % 100
__magic_name__ : str = centurian % 12
__magic_name__ : Union[str, Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__magic_name__ : Optional[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__magic_name__ : List[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154
| 0
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=18 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=400 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , ) -> Any:
A__ = size if size is not None else {"height": 18, "width": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
def snake_case__ ( self ) -> List[str]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class UpperCamelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = ImageGPTImageProcessor if is_vision_available() else None
def snake_case__ ( self ) -> List[str]:
A__ = ImageGPTImageProcessingTester(self )
@property
def snake_case__ ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ) -> List[Any]:
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "clusters" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_normalize" ) )
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def snake_case__ ( self ) -> str:
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE__ , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> List[Any]:
A__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , "image_processor.json" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE__ )
A__ = self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE__ ).to_dict()
A__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE__ )
A__ = self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE__ ).to_dict()
A__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE__ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def snake_case__ ( self ) -> str:
pass
def _lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = load_dataset("hf-internal-testing/fixtures_image_utils", split="test" )
A__ = Image.open(dataset[4]["file"] )
A__ = Image.open(dataset[5]["file"] )
A__ = [imagea, imagea]
return images
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self ) -> List[Any]:
A__ = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
A__ = prepare_images()
# test non-batched
A__ = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
A__ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE__ )
# test batched
A__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
A__ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE__ )
| 562
|
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCamelCase = 2048
UpperCamelCase = 4096
UpperCamelCase = 42
UpperCamelCase = os.environ.pop("""PROCESS_TRAIN""", """false""")
UpperCamelCase = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def _lowerCamelCase ( UpperCAmelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
def choose_first(UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : str=False ):
assert isinstance(UpperCAmelCase_, UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 1:
A__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
A__ = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
A__ = {"id": example["id"]}
A__ = example["annotations"]
A__ = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
A__ = ["yes"] if 1 in yes_no_answer else ["no"]
A__ = A__ = []
A__ = A__ = []
A__ = ["<cls>"]
else:
A__ = ["short"]
A__ = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
A__ = ["long"]
A__ = choose_first(annotation["long_answer"], is_long_answer=UpperCAmelCase_ )
A__ = []
answer.update(UpperCAmelCase_ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
A__ = True
else:
A__ = False
A__ = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k], UpperCAmelCase_ ) for k in cols ):
raise ValueError("Issue in ID", example["id"] )
return answer
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : Any=False ) -> Optional[Any]:
"""simple docstring"""
A__ = _get_single_answer(UpperCAmelCase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = example["document"]["tokens"]
A__ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(UpperCAmelCase_ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
A__ = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
A__ = example["document"]["tokens"]
A__ = answer["start_token"]
A__ = answer["end_token"]
A__ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
A__ = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
A__ = doc["is_html"][answer["start_token"] : answer["end_token"]]
A__ = doc["token"][answer["start_token"] : answer["end_token"]]
A__ = " ".join([old[i] for i in range(len(UpperCAmelCase_ ) ) if not is_html[i]] )
if new != old:
print("ID:", example["id"] )
print("New:", UpperCAmelCase_, end="\n" )
print("Old:", UpperCAmelCase_, end="\n\n" )
return {
"context": " ".join(UpperCAmelCase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : List[str]=2048, UpperCAmelCase_ : Union[str, Any]=4096, UpperCAmelCase_ : Optional[int]=True ) -> str:
"""simple docstring"""
A__ = get_context_and_ans(UpperCAmelCase_, assertion=UpperCAmelCase_ )
A__ = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
A__ = tokenizer(example["question"]["text"], out["context"] ).input_ids
A__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = []
A__ = []
A__ = input_ids[:q_len]
A__ = range(UpperCAmelCase_, len(UpperCAmelCase_ ), max_length - doc_stride )
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(UpperCAmelCase_ ),
"end_token": [-100] * len(UpperCAmelCase_ ),
"category": category,
},
}
A__ = out["context"].split()
A__ = splitted_context[answer["end_token"]]
A__ = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ), add_special_tokens=UpperCAmelCase_, ).input_ids )
A__ = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ), add_special_tokens=UpperCAmelCase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
A__ = len(tokenizer(UpperCAmelCase_, add_special_tokens=UpperCAmelCase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
A__ = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
A__ = answer["start_token"]
A__ = answer["end_token"]
if assertion:
A__ = tokenizer.decode(UpperCAmelCase_ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:", answer["span"] )
print("NEW:", UpperCAmelCase_, end="\n\n" )
if len(UpperCAmelCase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
A__ = input_ids[:q_len]
A__ = range(UpperCAmelCase_, len(UpperCAmelCase_ ), max_length - doc_stride )
A__ = []
A__ = []
A__ = []
A__ = [] # null, yes, no, long, short
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
A__ = start_token - i + q_len
A__ = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
A__ = -100
A__ = -100
answers_category.append("null" )
A__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(UpperCAmelCase_ )
answers_end_token.append(UpperCAmelCase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:", example["id"] )
print("New:", tokenizer.decode(UpperCAmelCase_ ) )
print("Old:", tokenizer.decode(UpperCAmelCase_ ), end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def _lowerCamelCase ( UpperCAmelCase_ : List[Any], UpperCAmelCase_ : str, UpperCAmelCase_ : Union[str, Any]=2048, UpperCAmelCase_ : Tuple=4096, UpperCAmelCase_ : int=False ) -> List[Any]:
"""simple docstring"""
A__ = get_strided_contexts_and_ans(
UpperCAmelCase_, UpperCAmelCase_, doc_stride=UpperCAmelCase_, max_length=UpperCAmelCase_, assertion=UpperCAmelCase_, )
return example
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
with jsonlines.open(UpperCAmelCase_, "a" ) as writer:
for example in tqdm(UpperCAmelCase_, total=len(UpperCAmelCase_ ), desc="Saving samples ... " ):
A__ = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"], labels["start_token"], labels["end_token"], labels["category"], ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCamelCase = load_dataset("""natural_questions""")
UpperCamelCase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
UpperCamelCase = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
UpperCamelCase = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
UpperCamelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCamelCase = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
UpperCamelCase = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 562
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
__UpperCamelCase = ["""transformers""", """torch""", """note_seq"""]
def __init__( self : Optional[int] ,*_lowerCAmelCase : List[str] ,**_lowerCAmelCase : str ):
"""simple docstring"""
requires_backends(self ,["transformers", "torch", "note_seq"] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] ,*_lowerCAmelCase : Optional[int] ,**_lowerCAmelCase : Dict ):
"""simple docstring"""
requires_backends(cls ,["transformers", "torch", "note_seq"] )
@classmethod
def UpperCamelCase_ ( cls : Any ,*_lowerCAmelCase : List[Any] ,**_lowerCAmelCase : List[str] ):
"""simple docstring"""
requires_backends(cls ,["transformers", "torch", "note_seq"] )
| 524
|
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = GenerationConfig(
do_sample=snake_case__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , config_name=snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = GenerationConfig.from_pretrained(snake_case__ , config_name=snake_case__ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , snake_case__ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained("gpt2" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = GenerationConfig.from_model_config(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(snake_case__ , snake_case__ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = GenerationConfig()
_SCREAMING_SNAKE_CASE : str = {
"max_new_tokens": 1024,
"foo": "bar",
}
_SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = generation_config.update(**snake_case__ )
# update_kwargs was not modified (no side effects)
self.assertEqual(snake_case__ , snake_case__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(snake_case__ , {"foo": "bar"} )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = GenerationConfig()
_SCREAMING_SNAKE_CASE : Dict = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig.from_pretrained(snake_case__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
_SCREAMING_SNAKE_CASE : Any = GenerationConfig.from_model_config(snake_case__ )
assert not hasattr(snake_case__ , "foo" ) # no new kwargs should be initialized if from config
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , snake_case__ )
self.assertEqual(default_config.num_beams , 1 )
_SCREAMING_SNAKE_CASE : Dict = GenerationConfig(
do_sample=snake_case__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , snake_case__ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE : Any = GenerationConfig.from_pretrained(snake_case__ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , snake_case__ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = GenerationConfig(
do_sample=snake_case__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="test-generation-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig(
do_sample=snake_case__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Tuple = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-generation-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
| 572
| 0
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase( UpperCamelCase_ ):
"""simple docstring"""
a : str = (PNDMScheduler,)
a : int = (("""num_inference_steps""", 5_0),)
def __a ( self , **lowerCamelCase ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__A )
return config
def __a ( self , lowerCamelCase=0 , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = dict(self.forward_default_kwargs )
lowercase__ : List[Any] = kwargs.pop("num_inference_steps" , __A )
lowercase__ : Optional[int] = self.dummy_sample
lowercase__ : List[Any] = 0.1 * sample
lowercase__ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ : str = self.get_scheduler_config(**__A )
lowercase__ : int = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals
lowercase__ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
lowercase__ : Tuple = scheduler_class.from_pretrained(__A )
new_scheduler.set_timesteps(__A )
# copy over dummy past residuals
lowercase__ : List[str] = dummy_past_residuals[:]
lowercase__ : int = scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
lowercase__ : List[Any] = new_scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ : Tuple = scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
lowercase__ : int = new_scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __a ( self ) -> str:
"""simple docstring"""
pass
def __a ( self , lowerCamelCase=0 , **lowerCamelCase ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[Any] = dict(self.forward_default_kwargs )
lowercase__ : Optional[int] = kwargs.pop("num_inference_steps" , __A )
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : List[Any] = 0.1 * sample
lowercase__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ : List[str] = self.get_scheduler_config()
lowercase__ : Union[str, Any] = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
lowercase__ : Tuple = scheduler_class.from_pretrained(__A )
# copy over dummy past residuals
new_scheduler.set_timesteps(__A )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ : Optional[Any] = dummy_past_residuals[:]
lowercase__ : List[str] = scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
lowercase__ : List[str] = new_scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ : Tuple = scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
lowercase__ : Union[str, Any] = new_scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __a ( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : List[str] = self.get_scheduler_config(**__A )
lowercase__ : List[Any] = scheduler_class(**__A )
lowercase__ : Optional[int] = 10
lowercase__ : Dict = self.dummy_model()
lowercase__ : Any = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.prk_timesteps ):
lowercase__ : List[str] = model(__A , __A )
lowercase__ : Union[str, Any] = scheduler.step_prk(__A , __A , __A ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowercase__ : List[Any] = model(__A , __A )
lowercase__ : List[Any] = scheduler.step_plms(__A , __A , __A ).prev_sample
return sample
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = dict(self.forward_default_kwargs )
lowercase__ : List[Any] = kwargs.pop("num_inference_steps" , __A )
for scheduler_class in self.scheduler_classes:
lowercase__ : int = self.get_scheduler_config()
lowercase__ : Any = scheduler_class(**__A )
lowercase__ : Optional[Any] = self.dummy_sample
lowercase__ : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(__A , "set_timesteps" ):
scheduler.set_timesteps(__A )
elif num_inference_steps is not None and not hasattr(__A , "set_timesteps" ):
lowercase__ : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ : Optional[Any] = dummy_past_residuals[:]
lowercase__ : Any = scheduler.step_prk(__A , 0 , __A , **__A ).prev_sample
lowercase__ : Dict = scheduler.step_prk(__A , 1 , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase__ : Dict = scheduler.step_plms(__A , 0 , __A , **__A ).prev_sample
lowercase__ : Optional[Any] = scheduler.step_plms(__A , 1 , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __a ( self ) -> Any:
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __a ( self ) -> List[str]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__A )
lowercase__ : Optional[Any] = self.scheduler_classes[0]
lowercase__ : Optional[int] = self.get_scheduler_config(steps_offset=1 )
lowercase__ : str = scheduler_class(**__A )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def __a ( self ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def __a ( self ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def __a ( self ) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=__A )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__A )
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = 27
for scheduler_class in self.scheduler_classes:
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Union[str, Any] = 0.1 * sample
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowercase__ : Optional[int] = scheduler.step_prk(__A , __A , __A ).prev_sample
def __a ( self ) -> Any:
"""simple docstring"""
with self.assertRaises(__A ):
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : Optional[int] = self.get_scheduler_config()
lowercase__ : List[str] = scheduler_class(**__A )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.full_loop()
lowercase__ : Optional[Any] = torch.sum(torch.abs(__A ) )
lowercase__ : Any = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Dict = self.full_loop(prediction_type="v_prediction" )
lowercase__ : List[Any] = torch.sum(torch.abs(__A ) )
lowercase__ : Optional[Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
lowercase__ : Dict = torch.sum(torch.abs(__A ) )
lowercase__ : str = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
lowercase__ : int = torch.sum(torch.abs(__A ) )
lowercase__ : Dict = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 703
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 298
| 0
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
SCREAMING_SNAKE_CASE_ = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[str] = "maskformer"
__SCREAMING_SNAKE_CASE : Dict = {"hidden_size": "mask_feature_size"}
__SCREAMING_SNAKE_CASE : List[Any] = ["resnet", "swin"]
__SCREAMING_SNAKE_CASE : Optional[int] = ["detr"]
def __init__( self , lowercase_ = 2_5_6 , lowercase_ = 2_5_6 , lowercase_ = 0.1 , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0.0_2 , lowercase_ = 1.0 , lowercase_ = 1.0 , lowercase_ = 1.0 , lowercase_ = 2_0.0 , lowercase_ = None , **lowercase_ , ) -> Dict:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = backbone_config.pop('model_type' )
UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase = config_class.from_dict(lowercase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase = (
decoder_config.pop('model_type' ) if isinstance(lowercase_ , lowercase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = CONFIG_MAPPING[decoder_type]
UpperCAmelCase = config_class.from_dict(lowercase_ )
UpperCAmelCase = backbone_config
UpperCAmelCase = decoder_config
# main feature dimension for the model
UpperCAmelCase = fpn_feature_size
UpperCAmelCase = mask_feature_size
# initializer
UpperCAmelCase = init_std
UpperCAmelCase = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase = cross_entropy_weight
UpperCAmelCase = dice_weight
UpperCAmelCase = mask_weight
UpperCAmelCase = use_auxiliary_loss
UpperCAmelCase = no_object_weight
UpperCAmelCase = output_auxiliary_logits
UpperCAmelCase = self.decoder_config.encoder_attention_heads
UpperCAmelCase = self.decoder_config.num_hidden_layers
super().__init__(**lowercase_ )
@classmethod
def a_ ( cls , lowercase_ , lowercase_ , **lowercase_ ) -> Tuple:
return cls(
backbone_config=lowercase_ , decoder_config=lowercase_ , **lowercase_ , )
def a_ ( self ) -> Dict[str, any]:
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.backbone_config.to_dict()
UpperCAmelCase = self.decoder_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 373
|
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _UpperCAmelCase :
def __init__( self , lowercase_ = "cpu" , lowercase_ = "openai/clip-vit-large-patch14" ) -> None:
UpperCAmelCase = device
UpperCAmelCase = CLIPTokenizerFast.from_pretrained(lowercase_ )
UpperCAmelCase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
UpperCAmelCase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
UpperCAmelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCAmelCase = torchvision.transforms.Resize(2_2_4 )
UpperCAmelCase = torchvision.transforms.CenterCrop(2_2_4 )
def a_ ( self , lowercase_ ) -> Any:
UpperCAmelCase = self.resize(lowercase_ )
UpperCAmelCase = self.center_crop(lowercase_ )
UpperCAmelCase = self.normalize(lowercase_ )
return images
def __call__( self , lowercase_=None , lowercase_=None , **lowercase_ ) -> Union[str, Any]:
UpperCAmelCase = self.tokenizer(text=lowercase_ , **lowercase_ )
UpperCAmelCase = self.preprocess_img(lowercase_ )
UpperCAmelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _UpperCAmelCase ( nn.Module ):
def __init__( self , lowercase_=1_0 , lowercase_=0.0_1 , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=True , lowercase_="image" , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=False , ) -> None:
super().__init__()
UpperCAmelCase = None
UpperCAmelCase = device if device else get_device()
if vqgan:
UpperCAmelCase = vqgan
else:
UpperCAmelCase = load_vqgan(self.device , conf_path=lowercase_ , ckpt_path=lowercase_ )
self.vqgan.eval()
if clip:
UpperCAmelCase = clip
else:
UpperCAmelCase = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
UpperCAmelCase = ProcessorGradientFlow(device=self.device )
UpperCAmelCase = iterations
UpperCAmelCase = lr
UpperCAmelCase = log
UpperCAmelCase = make_grid
UpperCAmelCase = return_val
UpperCAmelCase = quantize
UpperCAmelCase = self.vqgan.decoder.z_shape
def a_ ( self , lowercase_=None , lowercase_=None , lowercase_=5 , lowercase_=True ) -> Dict:
UpperCAmelCase = []
if output_path is None:
UpperCAmelCase = './animation.gif'
if input_path is None:
UpperCAmelCase = self.save_path
UpperCAmelCase = sorted(glob(input_path + '/*' ) )
if not len(lowercase_ ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(lowercase_ ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
UpperCAmelCase = total_duration / len(lowercase_ )
UpperCAmelCase = [frame_duration] * len(lowercase_ )
if extend_frames:
UpperCAmelCase = 1.5
UpperCAmelCase = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(lowercase_ ) )
imageio.mimsave(lowercase_ , lowercase_ , duration=lowercase_ )
print(F"gif saved to {output_path}" )
def a_ ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
UpperCAmelCase = preprocess(Image.open(lowercase_ ) , target_image_size=2_5_6 ).to(self.device )
UpperCAmelCase = preprocess_vqgan(lowercase_ )
UpperCAmelCase , *UpperCAmelCase = self.vqgan.encode(lowercase_ )
return z
def a_ ( self , lowercase_ ) -> Optional[int]:
UpperCAmelCase = self.latent.detach().requires_grad_()
UpperCAmelCase = base_latent + transform_vector
if self.quantize:
UpperCAmelCase , *UpperCAmelCase = self.vqgan.quantize(lowercase_ )
else:
UpperCAmelCase = trans_latent
return self.vqgan.decode(lowercase_ )
def a_ ( self , lowercase_ , lowercase_ , lowercase_=None ) -> str:
UpperCAmelCase = self.clip_preprocessor(text=lowercase_ , images=lowercase_ , return_tensors='pt' , padding=lowercase_ )
UpperCAmelCase = self.clip(**lowercase_ )
UpperCAmelCase = clip_outputs.logits_per_image
if weights is not None:
UpperCAmelCase = similarity_logits * weights
return similarity_logits.sum()
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
UpperCAmelCase = self._get_clip_similarity(pos_prompts['prompts'] , lowercase_ , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
UpperCAmelCase = self._get_clip_similarity(neg_prompts['prompts'] , lowercase_ , weights=neg_prompts['weights'] )
else:
UpperCAmelCase = torch.tensor([1] , device=self.device )
UpperCAmelCase = -torch.log(lowercase_ ) + torch.log(lowercase_ )
return loss
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
UpperCAmelCase = torch.randn_like(self.latent , requires_grad=lowercase_ , device=self.device )
UpperCAmelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCAmelCase = self._add_vector(lowercase_ )
UpperCAmelCase = loop_post_process(lowercase_ )
UpperCAmelCase = self._get_CLIP_loss(lowercase_ , lowercase_ , lowercase_ )
print('CLIP loss' , lowercase_ )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=lowercase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
wandb.init(reinit=lowercase_ , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
UpperCAmelCase = Image.open(lowercase_ )
UpperCAmelCase = image.resize((2_5_6, 2_5_6) )
wandb.log('Original Image' , wandb.Image(lowercase_ ) )
def a_ ( self , lowercase_ ) -> Tuple:
if not prompts:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(lowercase_ , (tuple, list) ):
UpperCAmelCase = prompt[0]
UpperCAmelCase = float(prompt[1] )
elif ":" in prompt:
UpperCAmelCase , UpperCAmelCase = prompt.split(':' )
UpperCAmelCase = float(lowercase_ )
else:
UpperCAmelCase = prompt
UpperCAmelCase = 1.0
processed_prompts.append(lowercase_ )
weights.append(lowercase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase_ , device=self.device ),
}
def a_ ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=None , ) -> List[str]:
if image_path:
UpperCAmelCase = self._get_latent(lowercase_ )
else:
UpperCAmelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowercase_ , lowercase_ , lowercase_ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCAmelCase = self.process_prompts(lowercase_ )
UpperCAmelCase = self.process_prompts(lowercase_ )
if save_final and save_path is None:
UpperCAmelCase = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
else:
UpperCAmelCase = save_path + '_' + get_timestamp()
os.makedirs(lowercase_ )
UpperCAmelCase = save_path
UpperCAmelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(lowercase_ ) )
UpperCAmelCase = loop_post_process(lowercase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase_ , lowercase_ , lowercase_ ) ):
if show_intermediate:
show_pil(lowercase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(lowercase_ )} )
if show_final:
show_pil(lowercase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png" ) )
| 373
| 1
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCAmelCase__ ( UpperCamelCase_ : dict )-> tuple:
return (data["data"], data["target"])
def lowerCAmelCase__ ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray )-> XGBClassifier:
A__ = XGBClassifier()
classifier.fit(UpperCamelCase_ , UpperCamelCase_ )
return classifier
def lowerCAmelCase__ ( )-> None:
A__ = load_iris()
A__ , A__ = data_handling(UpperCamelCase_ )
A__ , A__ , A__ , A__ = train_test_split(
UpperCamelCase_ , UpperCamelCase_ , test_size=0.25 )
A__ = iris['''target_names''']
# Create an XGBoost Classifier from the training data
A__ = xgboost(UpperCamelCase_ , UpperCamelCase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , display_labels=UpperCamelCase_ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 526
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''timm_backbone'''
def __init__( self , a__=None , a__=3 , a__=True , a__=True , a__=None , **a__ , ):
super().__init__(**a__)
A__ = backbone
A__ = num_channels
A__ = features_only
A__ = use_pretrained_backbone
A__ = True
A__ = out_indices if out_indices is not None else (-1,)
| 526
| 1
|
from math import sqrt
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int ) -> int:
_lowercase = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE_ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE_ ):
total += i
return total - n
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_00 ) -> int:
_lowercase = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE_ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE_ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 287
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=10 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=None , ):
_lowercase = size if size is not None else {"""shortest_edge""": 18}
_lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = num_frames
_lowercase = image_size
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = crop_size
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a_ ( _a , unittest.TestCase ):
a : str = VivitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
_lowercase = VivitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowercase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowercase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self ):
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowercase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self ):
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowercase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 287
| 1
|
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowercase = '''\\n\n'''
lowercase = '''\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'''
lowercase = '''\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def a_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def a_ ( self , a__ , a__ , a__ = 16 , a__ = True , a__=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__SCREAMING_SNAKE_CASE : Dict = "cuda"
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_a )
__SCREAMING_SNAKE_CASE : Any = model.to(_a )
__SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__SCREAMING_SNAKE_CASE : Optional[int] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__SCREAMING_SNAKE_CASE : Union[str, Any] = model.config.max_length - 1
else:
__SCREAMING_SNAKE_CASE : str = model.config.max_length
__SCREAMING_SNAKE_CASE : Dict = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors="pt" , return_attention_mask=_a , ).to(_a )
__SCREAMING_SNAKE_CASE : Optional[int] = encodings["input_ids"]
__SCREAMING_SNAKE_CASE : Dict = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : Tuple = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = min(start_index + batch_size , len(_a ) )
__SCREAMING_SNAKE_CASE : List[Any] = encoded_texts[start_index:end_index]
__SCREAMING_SNAKE_CASE : Any = attn_masks[start_index:end_index]
if add_start_token:
__SCREAMING_SNAKE_CASE : Any = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
__SCREAMING_SNAKE_CASE : List[Any] = encoded_batch
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Tuple = model(_a , attention_mask=_a ).logits
__SCREAMING_SNAKE_CASE : Tuple = out_logits[..., :-1, :].contiguous()
__SCREAMING_SNAKE_CASE : Optional[int] = labels[..., 1:].contiguous()
__SCREAMING_SNAKE_CASE : Union[str, Any] = attn_mask[..., 1:].contiguous()
__SCREAMING_SNAKE_CASE : Any = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 721
|
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__SCREAMING_SNAKE_CASE : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ):
return None
__SCREAMING_SNAKE_CASE : int = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__SCREAMING_SNAKE_CASE : Tuple = left
__SCREAMING_SNAKE_CASE : Any = point
elif point > right:
__SCREAMING_SNAKE_CASE : Tuple = right
__SCREAMING_SNAKE_CASE : Dict = point
else:
if item < current_item:
__SCREAMING_SNAKE_CASE : Optional[int] = point - 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = point + 1
return None
def __A ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__SCREAMING_SNAKE_CASE : Tuple = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif point > right:
return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point - 1 )
else:
return interpolation_search_by_recursion(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point + 1 , _SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if collection != sorted(_SCREAMING_SNAKE_CASE ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
lowercase = 0
if debug == 1:
lowercase = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
lowercase = 67
lowercase = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('''Not found''')
| 564
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case_ : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=4_00 , A_=None , A_=True , A_=True , A_=None , ):
_UpperCamelCase = size if size is not None else {"height": 20, "width": 20}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = do_convert_rgb
_UpperCamelCase = [5_12, 10_24, 20_48, 40_96]
_UpperCamelCase = patch_size if patch_size is not None else {"height": 16, "width": 16}
def a ( self ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def a ( self ):
_UpperCamelCase = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_UpperCamelCase = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def a ( self ):
_UpperCamelCase = PixaStructImageProcessingTester(self )
@property
def a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ):
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_convert_rgb" ) )
def a ( self ):
_UpperCamelCase = self.image_processor_tester.prepare_dummy_image()
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
_UpperCamelCase = 20_48
_UpperCamelCase = image_processor(A_ , return_tensors="pt" , max_patches=A_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def a ( self ):
# Initialize image_processor
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase = image_processor(
A_ , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def a ( self ):
# Initialize image_processor
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_UpperCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(A_ ):
_UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=A_ ).flattened_patches
_UpperCamelCase = "Hello"
_UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=A_ , header_text=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase = image_processor(
A_ , return_tensors="pt" , max_patches=A_ , header_text=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def a ( self ):
# Initialize image_processor
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
_UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase = image_processor(
A_ , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def a ( self ):
# Initialize image_processor
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase = image_processor(
A_ , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def a ( self ):
_UpperCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
_UpperCamelCase = 3
@property
def a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ):
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_convert_rgb" ) )
def a ( self ):
# Initialize image_processor
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase = image_processor(
A_ , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 138
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class A_ ( datasets.BuilderConfig ):
'''simple docstring'''
_lowerCAmelCase = None
class A_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_lowerCAmelCase = PandasConfig
def a ( self ):
return datasets.DatasetInfo(features=self.config.features )
def a ( self , A_ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_UpperCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A_ , (str, list, tuple) ):
_UpperCamelCase = data_files
if isinstance(A_ , A_ ):
_UpperCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_UpperCamelCase = [dl_manager.iter_files(A_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCamelCase = []
for split_name, files in data_files.items():
if isinstance(A_ , A_ ):
_UpperCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_UpperCamelCase = [dl_manager.iter_files(A_ ) for file in files]
splits.append(datasets.SplitGenerator(name=A_ , gen_kwargs={"files": files} ) )
return splits
def a ( self , A_ ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_UpperCamelCase = table_cast(A_ , self.config.features.arrow_schema )
return pa_table
def a ( self , A_ ):
for i, file in enumerate(itertools.chain.from_iterable(A_ ) ):
with open(A_ , "rb" ) as f:
_UpperCamelCase = pa.Table.from_pandas(pd.read_pickle(A_ ) )
yield i, self._cast_table(A_ )
| 138
| 1
|
import logging
from transformers.configuration_utils import PretrainedConfig
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """masked_bert"""
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=3_0522 , UpperCamelCase__ : Union[str, Any]=768 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="topK" , UpperCamelCase__ : str="constant" , UpperCamelCase__ : int=0.0 , **UpperCamelCase__ : Any , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = pruning_method
__magic_name__ = mask_init
__magic_name__ = mask_scale
| 76
|
import collections
import importlib.util
import os
import re
from pathlib import Path
__lowerCAmelCase : int = 'src/transformers'
# Matches is_xxx_available()
__lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__lowerCAmelCase : int = re.compile(R'^\s*try:')
# Catches a line with else:
__lowerCAmelCase : Tuple = re.compile(R'^\s*else:')
def a__ ( A_ ):
'''simple docstring'''
if _re_test_backend.search(A_ ) is None:
return None
__magic_name__ = [b[0] for b in _re_backend.findall(A_ )]
backends.sort()
return "_and_".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
__magic_name__ = f.readlines()
__magic_name__ = 0
while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A_ ):
return None
# First grab the objects without a specific backend in _import_structure
__magic_name__ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
__magic_name__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A_ ):
__magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0]
__magic_name__ = re.findall("""\[([^\]]+)\]""", A_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
__magic_name__ = _re_import_struct_key_value.search(A_ )
if single_line_import_search is not None:
__magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0]
objects.extend(A_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
__magic_name__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__magic_name__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
__magic_name__ = lines[line_index]
if _re_import_struct_add_one.search(A_ ) is not None:
objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] )
elif _re_import_struct_add_many.search(A_ ) is not None:
__magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ )
__magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_between_brackets.search(A_ ) is not None:
__magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ )
__magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_quote_object.search(A_ ) is not None:
objects.append(_re_quote_object.search(A_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
__magic_name__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__magic_name__ = []
while (
line_index < len(A_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
__magic_name__ = lines[line_index]
__magic_name__ = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
__magic_name__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(A_ ):
# If the line is an if is_backend_available, we grab all objects associated.
__magic_name__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
__magic_name__ = lines[line_index]
__magic_name__ = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
__magic_name__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( A_, A_ ):
'''simple docstring'''
def find_duplicates(A_ ):
return [k for k, v in collections.Counter(A_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__magic_name__ = []
for key in import_dict_objects.keys():
__magic_name__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
__magic_name__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__magic_name__ = """base imports""" if key == """none""" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def a__ ( ):
'''simple docstring'''
__magic_name__ = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
__magic_name__ = os.path.join(A_, """__init__.py""" )
__magic_name__ = parse_init(A_ )
if objects is not None:
__magic_name__ = analyze_results(*A_ )
if len(A_ ) > 0:
__magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(A_ ) )
if len(A_ ) > 0:
raise ValueError("""\n\n""".join(A_ ) )
def a__ ( ):
'''simple docstring'''
__magic_name__ = []
for path, directories, files in os.walk(A_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(A_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
__magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) )
__magic_name__ = short_path.replace(os.path.sep, """.""" )
submodules.append(A_ )
for fname in files:
if fname == "__init__.py":
continue
__magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) )
__magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(A_ )
return submodules
__lowerCAmelCase : Dict = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def a__ ( ):
'''simple docstring'''
__magic_name__ = importlib.util.spec_from_file_location(
"""transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
__magic_name__ = spec.loader.load_module()
__magic_name__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(A_ ) > 0:
__magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 76
| 1
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __A :
def __init__( self :List[str] , __snake_case :Optional[int] , __snake_case :str=13 , __snake_case :int=7 , __snake_case :List[str]=True , __snake_case :Tuple=True , __snake_case :List[str]=True , __snake_case :Optional[int]=True , __snake_case :Tuple=99 , __snake_case :Tuple=32 , __snake_case :Optional[int]=2 , __snake_case :str=4 , __snake_case :List[Any]=37 , __snake_case :List[Any]="gelu" , __snake_case :str=0.1 , __snake_case :List[str]=0.1 , __snake_case :Tuple=5_12 , __snake_case :List[Any]=16 , __snake_case :Union[str, Any]=2 , __snake_case :Tuple=0.02 , __snake_case :int=False , __snake_case :Optional[Any]=True , __snake_case :Union[str, Any]="None" , __snake_case :str=3 , __snake_case :Any=4 , __snake_case :Dict=None , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : int =batch_size
__magic_name__ : str =seq_length
__magic_name__ : Optional[int] =is_training
__magic_name__ : Dict =use_input_mask
__magic_name__ : Any =use_token_type_ids
__magic_name__ : List[str] =use_labels
__magic_name__ : List[str] =vocab_size
__magic_name__ : Union[str, Any] =hidden_size
__magic_name__ : str =num_hidden_layers
__magic_name__ : Union[str, Any] =num_attention_heads
__magic_name__ : int =intermediate_size
__magic_name__ : List[Any] =hidden_act
__magic_name__ : int =hidden_dropout_prob
__magic_name__ : Dict =attention_probs_dropout_prob
__magic_name__ : int =max_position_embeddings
__magic_name__ : str =type_vocab_size
__magic_name__ : List[Any] =type_sequence_label_size
__magic_name__ : Union[str, Any] =initializer_range
__magic_name__ : int =num_labels
__magic_name__ : str =num_choices
__magic_name__ : int =relative_attention
__magic_name__ : int =position_biased_input
__magic_name__ : Union[str, Any] =pos_att_type
__magic_name__ : List[str] =scope
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Any =None
if self.use_input_mask:
__magic_name__ : str =random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Union[str, Any] =None
if self.use_token_type_ids:
__magic_name__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : str =None
__magic_name__ : Optional[int] =None
__magic_name__ : Tuple =None
if self.use_labels:
__magic_name__ : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : str =DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self :int , __snake_case :Any , __snake_case :Union[str, Any] , __snake_case :List[Any] , __snake_case :Tuple , __snake_case :Union[str, Any] , __snake_case :List[str] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =TFDebertaVaModel(config=__snake_case )
__magic_name__ : Tuple ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__magic_name__ : List[str] =[input_ids, input_mask]
__magic_name__ : List[str] =model(__snake_case )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self :List[Any] , __snake_case :List[str] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :List[str] , __snake_case :Optional[Any] , __snake_case :Optional[Any] , __snake_case :Tuple ):
'''simple docstring'''
__magic_name__ : List[str] =TFDebertaVaForMaskedLM(config=__snake_case )
__magic_name__ : Optional[int] ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__magic_name__ : int =model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self :Any , __snake_case :Tuple , __snake_case :List[str] , __snake_case :int , __snake_case :Any , __snake_case :List[str] , __snake_case :int , __snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =self.num_labels
__magic_name__ : Union[str, Any] =TFDebertaVaForSequenceClassification(config=__snake_case )
__magic_name__ : Tuple ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__magic_name__ : int =model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self :Dict , __snake_case :List[Any] , __snake_case :Optional[int] , __snake_case :str , __snake_case :Dict , __snake_case :int , __snake_case :str , __snake_case :Any ):
'''simple docstring'''
__magic_name__ : Dict =self.num_labels
__magic_name__ : Optional[Any] =TFDebertaVaForTokenClassification(config=__snake_case )
__magic_name__ : Optional[Any] ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__magic_name__ : int =model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self :List[str] , __snake_case :str , __snake_case :Dict , __snake_case :int , __snake_case :Dict , __snake_case :Dict , __snake_case :Optional[int] , __snake_case :str ):
'''simple docstring'''
__magic_name__ : List[str] =TFDebertaVaForQuestionAnswering(config=__snake_case )
__magic_name__ : str ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__magic_name__ : List[Any] =model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Union[str, Any] =config_and_inputs
__magic_name__ : List[Any] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : List[str] =TFDebertaVaModelTester(self )
__magic_name__ : Any =ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Any =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(__snake_case )
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def A__ ( self :str ):
'''simple docstring'''
pass
@slow
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : str =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
__magic_name__ : str =tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__magic_name__ : List[str] =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__magic_name__ : Tuple =model(__snake_case , attention_mask=__snake_case )[0]
__magic_name__ : Any =tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __snake_case , atol=1E-4 )
| 21
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str:
a = VideoMAEConfig()
set_architecture_configs(__UpperCamelCase , __UpperCamelCase)
if "finetuned" not in model_name:
a = False
if "finetuned" in model_name:
a = "huggingface/label-files"
if "kinetics" in model_name:
a = 4_00
a = "kinetics400-id2label.json"
elif "ssv2" in model_name:
a = 1_74
a = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.")
a = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset") , "r"))
a = {int(__UpperCamelCase): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> Any:
if "small" in model_name:
a = 3_84
a = 15_36
a = 12
a = 16
a = 12
a = 3
a = 1_92
a = 7_68
elif "large" in model_name:
a = 10_24
a = 40_96
a = 24
a = 16
a = 12
a = 8
a = 5_12
a = 20_48
elif "huge" in model_name:
a = 12_80
a = 51_20
a = 32
a = 16
a = 12
a = 8
a = 6_40
a = 25_60
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"")
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Tuple:
if "encoder." in name:
a = name.replace("encoder." , "")
if "cls_token" in name:
a = name.replace("cls_token" , "videomae.embeddings.cls_token")
if "decoder_pos_embed" in name:
a = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed")
if "pos_embed" in name and "decoder" not in name:
a = name.replace("pos_embed" , "videomae.embeddings.position_embeddings")
if "patch_embed.proj" in name:
a = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection")
if "patch_embed.norm" in name:
a = name.replace("patch_embed.norm" , "videomae.embeddings.norm")
if "decoder.blocks" in name:
a = name.replace("decoder.blocks" , "decoder.decoder_layers")
if "blocks" in name:
a = name.replace("blocks" , "videomae.encoder.layer")
if "attn.proj" in name:
a = name.replace("attn.proj" , "attention.output.dense")
if "attn" in name and "bias" not in name:
a = name.replace("attn" , "attention.self")
if "attn" in name:
a = name.replace("attn" , "attention.attention")
if "norm1" in name:
a = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
a = name.replace("norm2" , "layernorm_after")
if "mlp.fc1" in name:
a = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
a = name.replace("mlp.fc2" , "output.dense")
if "decoder_embed" in name:
a = name.replace("decoder_embed" , "decoder.decoder_embed")
if "decoder_norm" in name:
a = name.replace("decoder_norm" , "decoder.decoder_norm")
if "decoder_pred" in name:
a = name.replace("decoder_pred" , "decoder.decoder_pred")
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
a = name.replace("norm.weight" , "videomae.layernorm.weight")
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
a = name.replace("norm.bias" , "videomae.layernorm.bias")
if "head" in name and "decoder" not in name:
a = name.replace("head" , "classifier")
return name
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> str:
for key in orig_state_dict.copy().keys():
a = orig_state_dict.pop(__UpperCamelCase)
if key.startswith("encoder."):
a = key.replace("encoder." , "")
if "qkv" in key:
a = key.split(".")
if key.startswith("decoder.blocks"):
a = config.decoder_hidden_size
a = int(key_split[2])
a = "decoder.decoder_layers."
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = config.hidden_size
a = int(key_split[1])
a = "videomae.encoder.layer."
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
a = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset")
a = np.load(__UpperCamelCase)
return list(__UpperCamelCase)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> List[Any]:
a = get_videomae_config(__UpperCamelCase)
if "finetuned" in model_name:
a = VideoMAEForVideoClassification(__UpperCamelCase)
else:
a = VideoMAEForPreTraining(__UpperCamelCase)
# download original checkpoint, hosted on Google Drive
a = "pytorch_model.bin"
gdown.cached_download(__UpperCamelCase , __UpperCamelCase , quiet=__UpperCamelCase)
a = torch.load(__UpperCamelCase , map_location="cpu")
if "model" in files:
a = files["model"]
else:
a = files["module"]
a = convert_state_dict(__UpperCamelCase , __UpperCamelCase)
model.load_state_dict(__UpperCamelCase)
model.eval()
# verify model on basic input
a = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
a = prepare_video()
a = image_processor(__UpperCamelCase , return_tensors="pt")
if "finetuned" not in model_name:
a = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt")
a = torch.load(__UpperCamelCase)
a = model(**__UpperCamelCase)
a = outputs.logits
a = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
a = torch.Size([1, 4_00])
a = torch.tensor([-0.9_291, -0.4_061, -0.9_307])
elif model_name == "videomae-small-finetuned-ssv2":
a = torch.Size([1, 1_74])
a = torch.tensor([0.2_671, -0.4_689, -0.8_235])
elif model_name == "videomae-base":
a = torch.Size([1, 14_08, 15_36])
a = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]])
elif model_name == "videomae-base-short":
a = torch.Size([1, 14_08, 15_36])
a = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]])
# we verified the loss both for normalized and unnormalized targets for this one
a = torch.tensor([0.5_142]) if config.norm_pix_loss else torch.tensor([0.6_469])
elif model_name == "videomae-large":
a = torch.Size([1, 14_08, 15_36])
a = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]])
elif model_name == "videomae-large-finetuned-kinetics":
a = torch.Size([1, 4_00])
a = torch.tensor([0.0_771, 0.0_011, -0.3_625])
elif model_name == "videomae-huge-finetuned-kinetics":
a = torch.Size([1, 4_00])
a = torch.tensor([0.2_433, 0.1_632, -0.4_894])
elif model_name == "videomae-base-short-finetuned-kinetics":
a = torch.Size([1, 4_00])
a = torch.tensor([0.6_588, 0.0_990, -0.2_493])
elif model_name == "videomae-base-finetuned-kinetics":
a = torch.Size([1, 4_00])
a = torch.tensor([0.3_669, -0.0_688, -0.2_421])
elif model_name == "videomae-base-short-ssv2":
a = torch.Size([1, 14_08, 15_36])
a = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]])
elif model_name == "videomae-base-short-finetuned-ssv2":
a = torch.Size([1, 1_74])
a = torch.tensor([-0.0_537, -0.1_539, -0.3_266])
elif model_name == "videomae-base-ssv2":
a = torch.Size([1, 14_08, 15_36])
a = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]])
elif model_name == "videomae-base-finetuned-ssv2":
a = torch.Size([1, 1_74])
a = torch.tensor([0.1_961, -0.8_337, -0.6_389])
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''')
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4)
else:
print("Logits:" , logits[0, :3, :3])
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1e-4)
print("Logits ok!")
# verify loss, if applicable
if model_name == "videomae-base-short":
a = outputs.loss
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-4)
print("Loss ok!")
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(__UpperCamelCase)
model.save_pretrained(__UpperCamelCase)
if push_to_hub:
print("Pushing to the hub...")
model.push_to_hub(__UpperCamelCase , organization="nielsr")
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase__ : Any = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 515
| 0
|
'''simple docstring'''
from __future__ import annotations
def __snake_case( _lowerCAmelCase ) -> list[int]:
snake_case__ : Tuple = [True] * limit
snake_case__ : Optional[Any] = False
snake_case__ : Tuple = False
snake_case__ : List[str] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case__ : Union[str, Any] = i * 2
while index < limit:
snake_case__ : Optional[Any] = False
snake_case__ : int = index + i
snake_case__ : Dict = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def __snake_case( _lowerCAmelCase = 1_000_000 ) -> int:
snake_case__ : List[Any] = prime_sieve(_lowerCAmelCase )
snake_case__ : List[Any] = 0
snake_case__ : List[str] = 0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
snake_case__ : List[str] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case__ : int = j - i
snake_case__ : str = sol
return largest
if __name__ == "__main__":
print(F"{solution() = }")
| 301
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> bool:
snake_case__ : Tuple = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __snake_case( _lowerCAmelCase = 5_000 ) -> int:
snake_case__ : Any = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
snake_case__ : Any = pentagonal_nums[j]
snake_case__ : Any = pentagonal_i + pentagonal_j
snake_case__ : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(F"{solution() = }")
| 301
| 1
|
"""simple docstring"""
import math
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 58
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__A : List[str] = HUGGINGFACE_HUB_CACHE
__A : Optional[int] = 'config.json'
__A : List[str] = 'diffusion_pytorch_model.bin'
__A : Tuple = 'diffusion_flax_model.msgpack'
__A : List[Any] = 'model.onnx'
__A : List[str] = 'diffusion_pytorch_model.safetensors'
__A : Dict = 'weights.pb'
__A : Dict = 'https://huggingface.co'
__A : str = default_cache_path
__A : Tuple = 'diffusers_modules'
__A : Union[str, Any] = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
__A : Dict = ['fp16', 'non-ema']
__A : str = '.self_attn'
| 394
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
| 605
|
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowercase = threading.Lock()
__lowercase = None
__lowercase = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
__lowercase = logging.WARNING
__lowercase = True
def snake_case__ ( ) -> int:
'''simple docstring'''
lowerCAmelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , _A )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def snake_case__ ( ) -> str:
'''simple docstring'''
return __name__.split(""".""" )[0]
def snake_case__ ( ) -> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def snake_case__ ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowerCAmelCase = logging.StreamHandler() # Set sys.stderr as stream.
lowerCAmelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowerCAmelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowerCAmelCase = False
def snake_case__ ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
lowerCAmelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowerCAmelCase = None
def snake_case__ ( ) -> Dict:
'''simple docstring'''
return log_levels
def snake_case__ ( _A: Optional[str] = None ) -> logging.Logger:
'''simple docstring'''
if name is None:
lowerCAmelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_A )
def snake_case__ ( ) -> int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def snake_case__ ( _A: int ) -> None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(_A )
def snake_case__ ( ) -> int:
'''simple docstring'''
return set_verbosity(_A )
def snake_case__ ( ) -> List[str]:
'''simple docstring'''
return set_verbosity(_A )
def snake_case__ ( ) -> Optional[int]:
'''simple docstring'''
return set_verbosity(_A )
def snake_case__ ( ) -> List[str]:
'''simple docstring'''
return set_verbosity(_A )
def snake_case__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def snake_case__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def snake_case__ ( _A: logging.Handler ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_A )
def snake_case__ ( _A: logging.Handler ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_A )
def snake_case__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
lowerCAmelCase = False
def snake_case__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
lowerCAmelCase = True
def snake_case__ ( ) -> None:
'''simple docstring'''
lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
lowerCAmelCase = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(_A )
def snake_case__ ( ) -> None:
'''simple docstring'''
lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_A )
def snake_case__ ( self: str , *_A: Optional[int] , **_A: Dict ) -> str:
'''simple docstring'''
lowerCAmelCase = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , _A )
if no_advisory_warnings:
return
self.warning(*_A , **_A )
__lowercase = warning_advice
@functools.lru_cache(_A )
def snake_case__ ( self: List[str] , *_A: List[Any] , **_A: str ) -> List[str]:
'''simple docstring'''
self.warning(*_A , **_A )
__lowercase = warning_once
class a__:
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase = args[0] if args else None
def __iter__( self):
"""simple docstring"""
return iter(self._iterator)
def __getattr__( self , __lowerCAmelCase):
"""simple docstring"""
def empty_fn(*__lowerCAmelCase , **__lowerCAmelCase): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self):
"""simple docstring"""
return self
def __exit__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
return
class a__:
'''simple docstring'''
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*__lowerCAmelCase , **__lowerCAmelCase)
else:
return EmptyTqdm(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowercase = _tqdm_cls()
def snake_case__ ( ) -> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def snake_case__ ( ) -> Dict:
'''simple docstring'''
global _tqdm_active
lowerCAmelCase = True
hf_hub_utils.enable_progress_bars()
def snake_case__ ( ) -> Any:
'''simple docstring'''
global _tqdm_active
lowerCAmelCase = False
hf_hub_utils.disable_progress_bars()
| 605
| 1
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCamelCase__ : List[str] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
lowerCamelCase__ : List[str] = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
)
lowerCamelCase__ : List[str] = """|""".join(sys.argv[1:])
lowerCamelCase__ : Optional[Any] = re.compile(rF"""^({joined_dirs}).*?\.py$""")
lowerCamelCase__ : List[Any] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 33
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[str] ={
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] =['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] =['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] =[
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str =[
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 399
| 0
|
from math import sqrt
def A (__A : int ) -> int:
"""simple docstring"""
UpperCAmelCase_ = 0
for i in range(1 , int(sqrt(__A ) + 1 ) ):
if n % i == 0 and i != sqrt(__A ):
total += i + n // i
elif i == sqrt(__A ):
total += i
return total - n
def A (__A : int = 10000 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = sum(
i
for i in range(1 , __A )
if sum_of_divisors(sum_of_divisors(__A ) ) == i and sum_of_divisors(__A ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 169
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
snake_case_ : Tuple = False
class __snake_case ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''')
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = pipe(
image=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 169
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
SCREAMING_SNAKE_CASE__ = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
SCREAMING_SNAKE_CASE__ = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
SCREAMING_SNAKE_CASE__ = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=1_0000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
SCREAMING_SNAKE_CASE__ = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} )
SCREAMING_SNAKE_CASE__ = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
SCREAMING_SNAKE_CASE__ = field(default=5_0000 , metadata={'''help''': '''Maximum number of training steps.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
SCREAMING_SNAKE_CASE__ = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
SCREAMING_SNAKE_CASE__ = field(default=1 , metadata={'''help''': '''Training seed.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
SCREAMING_SNAKE_CASE__ = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
SCREAMING_SNAKE_CASE__ = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
SCREAMING_SNAKE_CASE__ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
SCREAMING_SNAKE_CASE__ = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
SCREAMING_SNAKE_CASE__ = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
SCREAMING_SNAKE_CASE__ = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
SCREAMING_SNAKE_CASE__ = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
SCREAMING_SNAKE_CASE__ = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
SCREAMING_SNAKE_CASE__ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
SCREAMING_SNAKE_CASE__ = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
SCREAMING_SNAKE_CASE__ = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=10_0000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
SCREAMING_SNAKE_CASE__ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
SCREAMING_SNAKE_CASE__ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
SCREAMING_SNAKE_CASE__ = field(default=20_0000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=3_2768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
SCREAMING_SNAKE_CASE__ = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
SCREAMING_SNAKE_CASE__ = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 379
|
'''simple docstring'''
def snake_case_ ( lowercase__ = "The quick brown fox jumps over the lazy dog" , ):
UpperCAmelCase__ : Dict = set()
# Replace all the whitespace in our sentence
UpperCAmelCase__ : str = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowercase__ ) == 2_6
def snake_case_ ( lowercase__ = "The quick brown fox jumps over the lazy dog" , ):
UpperCAmelCase__ : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase__ : List[Any] = True
elif char.isupper():
UpperCAmelCase__ : List[Any] = True
return all(lowercase__ )
def snake_case_ ( lowercase__ = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def snake_case_ ( ):
from timeit import timeit
UpperCAmelCase__ : Union[str, Any] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowercase__ ) )
print(timeit("is_pangram_faster()" , setup=lowercase__ ) )
print(timeit("is_pangram_fastest()" , setup=lowercase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 199
| 0
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCamelCase( _a , unittest.TestCase ):
snake_case_ : int = PriorTransformer
snake_case_ : List[Any] = """hidden_states"""
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case = 4
__snake_case = 8
__snake_case = 7
__snake_case = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=0 ) -> List[str]:
'''simple docstring'''
torch.manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = 4
__snake_case = 8
__snake_case = 7
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return (4, 8)
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
'''simple docstring'''
return (4, 8)
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(SCREAMING_SNAKE_CASE )
__snake_case = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
'''simple docstring'''
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
__snake_case = model.to(SCREAMING_SNAKE_CASE )
if hasattr(SCREAMING_SNAKE_CASE , "set_default_attn_processor" ):
model.set_default_attn_processor()
__snake_case = self.get_dummy_seed_input()
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE )[0]
__snake_case = output[0, :5].flatten().cpu()
print(SCREAMING_SNAKE_CASE )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__snake_case = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rtol=1e-2 ) )
@slow
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int]=1 , SCREAMING_SNAKE_CASE : Dict=7_6_8 , SCREAMING_SNAKE_CASE : Optional[Any]=7_7 , SCREAMING_SNAKE_CASE : Any=0 ) -> int:
'''simple docstring'''
torch.manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = batch_size
__snake_case = embedding_dim
__snake_case = num_embeddings
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[3_7, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_seed_input(seed=SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE )[0]
assert list(sample.shape ) == [1, 7_6_8]
__snake_case = sample[0, :8].flatten().cpu()
print(SCREAMING_SNAKE_CASE )
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE )
assert torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
| 473
|
class UpperCamelCase:
def __init__( self : Any ) -> Any:
'''simple docstring'''
__snake_case = 0
__snake_case = 0
__snake_case = {}
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if vertex not in self.adjacency:
__snake_case = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
'''simple docstring'''
self.add_vertex(SCREAMING_SNAKE_CASE )
self.add_vertex(SCREAMING_SNAKE_CASE )
if head == tail:
return
__snake_case = weight
__snake_case = weight
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
__snake_case = self.get_edges()
for edge in edges:
__snake_case , __snake_case , __snake_case = edge
edges.remove((tail, head, weight) )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__snake_case = list(edges[i] )
edges.sort(key=lambda SCREAMING_SNAKE_CASE : e[2] )
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__snake_case = edges[i][2] + 1
for edge in edges:
__snake_case , __snake_case , __snake_case = edge
__snake_case = weight
__snake_case = weight
def __str__( self : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__snake_case = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[Any]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ) -> int:
'''simple docstring'''
__snake_case = Graph()
if vertices is None:
__snake_case = []
if edges is None:
__snake_case = []
for vertex in vertices:
g.add_vertex(SCREAMING_SNAKE_CASE )
for edge in edges:
g.add_edge(*SCREAMING_SNAKE_CASE )
return g
class UpperCamelCase:
def __init__( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = {}
__snake_case = {}
def __len__( self : List[str] ) -> Dict:
'''simple docstring'''
return len(self.parent )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : int ) -> List[str]:
'''simple docstring'''
if item in self.parent:
return self.find(SCREAMING_SNAKE_CASE )
__snake_case = item
__snake_case = 0
return item
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] ) -> Any:
'''simple docstring'''
if item not in self.parent:
return self.make_set(SCREAMING_SNAKE_CASE )
if item != self.parent[item]:
__snake_case = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case = self.find(SCREAMING_SNAKE_CASE )
__snake_case = self.find(SCREAMING_SNAKE_CASE )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__snake_case = roota
return roota
if self.rank[roota] < self.rank[roota]:
__snake_case = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__snake_case = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE : str ) -> Any:
'''simple docstring'''
__snake_case = graph.num_vertices
__snake_case = Graph.UnionFind()
__snake_case = []
while num_components > 1:
__snake_case = {}
for vertex in graph.get_vertices():
__snake_case = -1
__snake_case = graph.get_edges()
for edge in edges:
__snake_case , __snake_case , __snake_case = edge
edges.remove((tail, head, weight) )
for edge in edges:
__snake_case , __snake_case , __snake_case = edge
__snake_case = union_find.find(SCREAMING_SNAKE_CASE )
__snake_case = union_find.find(SCREAMING_SNAKE_CASE )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__snake_case = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__snake_case = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__snake_case , __snake_case , __snake_case = cheap_edge[vertex]
if union_find.find(SCREAMING_SNAKE_CASE ) != union_find.find(SCREAMING_SNAKE_CASE ):
union_find.union(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
mst_edges.append(cheap_edge[vertex] )
__snake_case = num_components - 1
__snake_case = Graph.build(edges=SCREAMING_SNAKE_CASE )
return mst
| 473
| 1
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
def __UpperCAmelCase ( snake_case_ : Namespace ) -> Union[str, Any]:
"""simple docstring"""
return TrainCommand(snake_case_ )
class __lowerCamelCase ( __lowercase ):
@staticmethod
def A__ (lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=lowerCamelCase , required=lowerCamelCase , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=lowerCamelCase , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=lowerCamelCase , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=lowerCamelCase , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=lowerCamelCase , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=lowerCamelCase , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=lowerCamelCase , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=lowerCamelCase , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=lowerCamelCase , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=lowerCamelCase , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=lowerCamelCase , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=lowerCamelCase , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=lowerCamelCase , default=1e-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=lowerCamelCase )
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = logging.get_logger("""transformers-cli/training""" )
_lowerCAmelCase = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=lowerCamelCase )
_lowerCAmelCase = args.output
_lowerCAmelCase = args.column_label
_lowerCAmelCase = args.column_text
_lowerCAmelCase = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowerCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
_lowerCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
_lowerCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase = args.validation_split
_lowerCAmelCase = args.train_batch_size
_lowerCAmelCase = args.valid_batch_size
_lowerCAmelCase = args.learning_rate
_lowerCAmelCase = args.adam_epsilon
def A__ (self ):
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def A__ (self ):
'''simple docstring'''
raise NotImplementedError
def A__ (self ):
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 156
|
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
class __lowerCamelCase ( __lowercase ):
def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
'''simple docstring'''
super().__init__(
lowerCamelCase , question_encoder_tokenizer=lowerCamelCase , generator_tokenizer=lowerCamelCase , index=lowerCamelCase , init_retrieval=lowerCamelCase , )
_lowerCAmelCase = None
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
_lowerCAmelCase = self._infer_socket_ifname()
# avoid clash with the NCCL port
_lowerCAmelCase = str(distributed_port + 1 )
_lowerCAmelCase = dist.new_group(ranks=lowerCamelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def A__ (self ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase=torch.floataa ):
'''simple docstring'''
_lowerCAmelCase = torch.empty(lowerCamelCase , dtype=lowerCamelCase )
dist.scatter(lowerCamelCase , src=0 , scatter_list=lowerCamelCase , group=self.process_group )
return target_tensor
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_lowerCAmelCase = next((addr for addr in addrs if addr.startswith("""e""" )) , lowerCamelCase )
return ifname
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not dist.is_initialized():
_lowerCAmelCase , _lowerCAmelCase = self._main_retrieve(lowerCamelCase , lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCamelCase )
# distributed training
_lowerCAmelCase = dist.get_world_size(group=self.process_group )
# gather logic
_lowerCAmelCase = None
if self._is_main():
_lowerCAmelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCamelCase )]
dist.gather(torch.tensor(lowerCamelCase ) , dst=0 , gather_list=lowerCamelCase , group=self.process_group )
# scatter logic
_lowerCAmelCase = question_hidden_states.shape[0]
_lowerCAmelCase = []
_lowerCAmelCase = []
if self._is_main():
assert len(lowerCamelCase ) == world_size
_lowerCAmelCase , _lowerCAmelCase = self._main_retrieve(torch.cat(lowerCamelCase ).numpy() , lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = torch.tensor(lowerCamelCase ), torch.tensor(lowerCamelCase )
_lowerCAmelCase = self._chunk_tensor(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = self._chunk_tensor(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = self._scattered(lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_lowerCAmelCase = self._scattered(lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCamelCase )
| 156
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ :Tuple = logging.get_logger(__name__)
lowercase__ :str = {"vocab_file": "spiece.model"}
lowercase__ :List[str] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
lowercase__ :int = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
lowercase__ :Optional[Any] = "▁"
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] =VOCAB_FILES_NAMES
lowercase_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,A__ ,A__=True ,A__=True ,A__=False ,A__="[CLS]" ,A__="[SEP]" ,A__="<unk>" ,A__="[SEP]" ,A__="<pad>" ,A__="[CLS]" ,A__="[MASK]" ,A__ = None ,**A__ ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase = (
AddedToken(A__ ,lstrip=A__ ,rstrip=A__ ,normalized=A__)
if isinstance(A__ ,A__)
else mask_token
)
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A__ ,remove_space=A__ ,keep_accents=A__ ,bos_token=A__ ,eos_token=A__ ,unk_token=A__ ,sep_token=A__ ,pad_token=A__ ,cls_token=A__ ,mask_token=A__ ,sp_model_kwargs=self.sp_model_kwargs ,**A__ ,)
lowercase = do_lower_case
lowercase = remove_space
lowercase = keep_accents
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(A__)
@property
def A__ ( self):
return len(self.sp_model)
def A__ ( self):
lowercase = {self.convert_ids_to_tokens(A__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self ,A__):
lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs'''):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self ,A__):
if self.remove_space:
lowercase = ''' '''.join(inputs.strip().split())
else:
lowercase = inputs
lowercase = outputs.replace('''``''' ,'''"''').replace('''\'\'''' ,'''"''')
if not self.keep_accents:
lowercase = unicodedata.normalize('''NFKD''' ,A__)
lowercase = ''''''.join([c for c in outputs if not unicodedata.combining(A__)])
if self.do_lower_case:
lowercase = outputs.lower()
return outputs
def A__ ( self ,A__):
lowercase = self.preprocess_text(A__)
lowercase = self.sp_model.encode(A__ ,out_type=A__)
lowercase = []
for piece in pieces:
if len(A__) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(A__ ,''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
lowercase = cur_pieces[1:]
else:
lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(A__)
else:
new_pieces.append(A__)
return new_pieces
def A__ ( self ,A__):
return self.sp_model.PieceToId(A__)
def A__ ( self ,A__):
return self.sp_model.IdToPiece(A__)
def A__ ( self ,A__):
lowercase = []
lowercase = ''''''
lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A__) + token
lowercase = True
lowercase = []
else:
current_sub_tokens.append(A__)
lowercase = False
out_string += self.sp_model.decode(A__)
return out_string.strip()
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self ,A__ ,A__ = None ,A__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ ,token_ids_a=A__ ,already_has_special_tokens=A__)
if token_ids_a is not None:
return [1] + ([0] * len(A__)) + [1] + ([0] * len(A__)) + [1]
return [1] + ([0] * len(A__)) + [1]
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def A__ ( self ,A__ ,A__ = None):
if not os.path.isdir(A__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(A__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,A__)
elif not os.path.isfile(self.vocab_file):
with open(A__ ,'''wb''') as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(A__)
return (out_vocab_file,)
| 717
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class snake_case_ ( a_ ):
__lowerCAmelCase = "van"
def __init__( self , a_=2_2_4 , a_=3 , a_=[7, 3, 3, 3] , a_=[4, 2, 2, 2] , a_=[6_4, 1_2_8, 3_2_0, 5_1_2] , a_=[3, 3, 1_2, 3] , a_=[8, 8, 4, 4] , a_="gelu" , a_=0.02 , a_=1e-6 , a_=1e-2 , a_=0.0 , a_=0.0 , **a_ , ):
super().__init__(**a_ )
a_ : Tuple = image_size
a_ : Optional[int] = num_channels
a_ : List[Any] = patch_sizes
a_ : Optional[int] = strides
a_ : Any = hidden_sizes
a_ : Optional[int] = depths
a_ : int = mlp_ratios
a_ : Tuple = hidden_act
a_ : str = initializer_range
a_ : Any = layer_norm_eps
a_ : str = layer_scale_init_value
a_ : List[str] = drop_path_rate
a_ : List[Any] = dropout_rate
| 237
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 237
| 1
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCamelCase__ = logging.getLogger(__name__)
class a__ ( UpperCamelCase_ ):
def __UpperCamelCase ( self : List[Any] ,a__ : Optional[int] ,a__ : Optional[int] ,a__ : int=None ,a__ : Any=None) -> str:
"""simple docstring"""
_lowerCAmelCase:Dict = self.layer[current_layer](a__ ,a__ ,head_mask[current_layer])
_lowerCAmelCase:Tuple = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , UpperCamelCase_ , )
class a__ ( UpperCamelCase_ ):
def __init__( self : int ,a__ : Any) -> Tuple:
"""simple docstring"""
super().__init__(a__)
_lowerCAmelCase:List[Any] = BertEncoderWithPabee(a__)
self.init_weights()
_lowerCAmelCase:Dict = 0
_lowerCAmelCase:Dict = 0
_lowerCAmelCase:Tuple = 0
_lowerCAmelCase:Tuple = 0
def __UpperCamelCase ( self : int ,a__ : Any) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:int = threshold
def __UpperCamelCase ( self : List[str] ,a__ : Dict) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Dict = patience
def __UpperCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = 0
_lowerCAmelCase:Dict = 0
def __UpperCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_lowerCAmelCase:Any = self.inference_layers_num / self.inference_instances_num
_lowerCAmelCase:Tuple = (
F'*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='
F' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'
)
print(a__)
@add_start_docstrings_to_model_forward(a__)
def __UpperCamelCase ( self : List[str] ,a__ : List[str]=None ,a__ : Union[str, Any]=None ,a__ : Tuple=None ,a__ : Optional[Any]=None ,a__ : Optional[int]=None ,a__ : List[str]=None ,a__ : List[str]=None ,a__ : Tuple=None ,a__ : str=None ,a__ : Optional[Any]=None ,a__ : Dict=False ,) -> str:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''')
elif input_ids is not None:
_lowerCAmelCase:Optional[Any] = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase:Union[str, Any] = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''')
_lowerCAmelCase:int = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase:Union[str, Any] = torch.ones(a__ ,device=a__)
if token_type_ids is None:
_lowerCAmelCase:Optional[Any] = torch.zeros(a__ ,dtype=torch.long ,device=a__)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase:torch.Tensor = self.get_extended_attention_mask(a__ ,a__ ,a__)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:List[Any] = encoder_hidden_states.size()
_lowerCAmelCase:Any = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_lowerCAmelCase:Optional[Any] = torch.ones(a__ ,device=a__)
_lowerCAmelCase:int = self.invert_attention_mask(a__)
else:
_lowerCAmelCase:Dict = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase:Dict = self.get_head_mask(a__ ,self.config.num_hidden_layers)
_lowerCAmelCase:str = self.embeddings(
input_ids=a__ ,position_ids=a__ ,token_type_ids=a__ ,inputs_embeds=a__)
_lowerCAmelCase:List[Any] = embedding_output
if self.training:
_lowerCAmelCase:str = []
for i in range(self.config.num_hidden_layers):
_lowerCAmelCase:List[str] = self.encoder.adaptive_forward(
a__ ,current_layer=a__ ,attention_mask=a__ ,head_mask=a__)
_lowerCAmelCase:Optional[int] = self.pooler(a__)
_lowerCAmelCase:Optional[int] = output_layers[i](output_dropout(a__))
res.append(a__)
elif self.patience == 0: # Use all layers for inference
_lowerCAmelCase:Optional[int] = self.encoder(
a__ ,attention_mask=a__ ,head_mask=a__ ,encoder_hidden_states=a__ ,encoder_attention_mask=a__ ,)
_lowerCAmelCase:str = self.pooler(encoder_outputs[0])
_lowerCAmelCase:int = [output_layers[self.config.num_hidden_layers - 1](a__)]
else:
_lowerCAmelCase:int = 0
_lowerCAmelCase:Optional[Any] = None
_lowerCAmelCase:Optional[Any] = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
_lowerCAmelCase:Optional[int] = self.encoder.adaptive_forward(
a__ ,current_layer=a__ ,attention_mask=a__ ,head_mask=a__)
_lowerCAmelCase:Tuple = self.pooler(a__)
_lowerCAmelCase:Dict = output_layers[i](a__)
if regression:
_lowerCAmelCase:List[str] = logits.detach()
if patient_result is not None:
_lowerCAmelCase:Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
_lowerCAmelCase:Optional[int] = 0
else:
_lowerCAmelCase:Optional[int] = logits.detach().argmax(dim=1)
if patient_result is not None:
_lowerCAmelCase:List[str] = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(a__)):
patient_counter += 1
else:
_lowerCAmelCase:Union[str, Any] = 0
_lowerCAmelCase:Optional[Any] = logits
if patient_counter == self.patience:
break
_lowerCAmelCase:str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , UpperCamelCase_ , )
class a__ ( UpperCamelCase_ ):
def __init__( self : Optional[int] ,a__ : Tuple) -> Dict:
"""simple docstring"""
super().__init__(a__)
_lowerCAmelCase:str = config.num_labels
_lowerCAmelCase:str = BertModelWithPabee(a__)
_lowerCAmelCase:str = nn.Dropout(config.hidden_dropout_prob)
_lowerCAmelCase:Any = nn.ModuleList(
[nn.Linear(config.hidden_size ,self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(a__)
def __UpperCamelCase ( self : List[Any] ,a__ : List[str]=None ,a__ : Dict=None ,a__ : str=None ,a__ : List[str]=None ,a__ : Optional[Any]=None ,a__ : Optional[Any]=None ,a__ : Optional[int]=None ,) -> str:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.bert(
input_ids=a__ ,attention_mask=a__ ,token_type_ids=a__ ,position_ids=a__ ,head_mask=a__ ,inputs_embeds=a__ ,output_dropout=self.dropout ,output_layers=self.classifiers ,regression=self.num_labels == 1 ,)
_lowerCAmelCase:int = (logits[-1],)
if labels is not None:
_lowerCAmelCase:Any = None
_lowerCAmelCase:int = 0
for ix, logits_item in enumerate(a__):
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase:Tuple = MSELoss()
_lowerCAmelCase:List[Any] = loss_fct(logits_item.view(-1) ,labels.view(-1))
else:
_lowerCAmelCase:Union[str, Any] = CrossEntropyLoss()
_lowerCAmelCase:Dict = loss_fct(logits_item.view(-1 ,self.num_labels) ,labels.view(-1))
if total_loss is None:
_lowerCAmelCase:Tuple = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_lowerCAmelCase:List[Any] = (total_loss / total_weights,) + outputs
return outputs
| 439
|
"""simple docstring"""
import baseaa
def UpperCAmelCase ( snake_case : str ):
return baseaa.aaaencode(string.encode('''utf-8''' ) )
def UpperCAmelCase ( snake_case : bytes ):
return baseaa.aaadecode(snake_case ).decode('''utf-8''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 439
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class A__ :
def __init__( self ) -> None:
'''simple docstring'''
A_ = []
A_ = 0
A_ = 0
def snake_case_ ( self ) -> bool:
'''simple docstring'''
return self.head == self.tail
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
self.data.append(UpperCamelCase__ )
A_ = self.tail + 1
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.data[self.head]
A_ = self.head + 1
return ret
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self.tail - self.head
def snake_case_ ( self ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class A__ :
def __init__( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = data
A_ = None
A_ = None
A_ = 1
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return self.data
def snake_case_ ( self ) -> MyNode | None:
'''simple docstring'''
return self.left
def snake_case_ ( self ) -> MyNode | None:
'''simple docstring'''
return self.right
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self.height
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = data
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = node
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = node
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = height
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if node is None:
return 0
return node.get_height()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if a > b:
return a
return b
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> MyNode:
print("""left rotation node:""", node.get_data() )
A_ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(UpperCAmelCase__ )
A_ = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
A_ = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> MyNode:
print("""right rotation node:""", node.get_data() )
A_ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(UpperCAmelCase__ )
A_ = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
A_ = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> MyNode:
A_ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(UpperCAmelCase__ ) )
return right_rotation(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> MyNode:
A_ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(UpperCAmelCase__ ) )
return left_rotation(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> MyNode | None:
if node is None:
return MyNode(UpperCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left(), UpperCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
A_ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
A_ = right_rotation(UpperCAmelCase__ )
else:
A_ = lr_rotation(UpperCAmelCase__ )
else:
node.set_right(insert_node(node.get_right(), UpperCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
A_ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
A_ = rl_rotation(UpperCAmelCase__ )
else:
A_ = left_rotation(UpperCAmelCase__ )
A_ = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
return node
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
while True:
A_ = root.get_right()
if right_child is None:
break
A_ = right_child
return root.get_data()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
while True:
A_ = root.get_left()
if left_child is None:
break
A_ = left_child
return root.get_data()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> MyNode | None:
A_ = root.get_left()
A_ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
A_ = get_left_most(UpperCAmelCase__ )
root.set_data(UpperCAmelCase__ )
root.set_right(del_node(UpperCAmelCase__, UpperCAmelCase__ ) )
elif left_child is not None:
A_ = left_child
elif right_child is not None:
A_ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(UpperCAmelCase__, UpperCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(UpperCAmelCase__, UpperCAmelCase__ ) )
if get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
A_ = left_rotation(UpperCAmelCase__ )
else:
A_ = rl_rotation(UpperCAmelCase__ )
elif get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
A_ = right_rotation(UpperCAmelCase__ )
else:
A_ = lr_rotation(UpperCAmelCase__ )
A_ = my_max(get_height(root.get_right() ), get_height(root.get_left() ) ) + 1
root.set_height(UpperCAmelCase__ )
return root
class A__ :
def __init__( self ) -> None:
'''simple docstring'''
A_ = None
def snake_case_ ( self ) -> int:
'''simple docstring'''
return get_height(self.root )
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
print("""insert:""" + str(UpperCamelCase__ ) )
A_ = insert_node(self.root , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
print("""delete:""" + str(UpperCamelCase__ ) )
if self.root is None:
print("""Tree is empty!""" )
return
A_ = del_node(self.root , UpperCamelCase__ )
def __str__( self , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
A_ = """"""
A_ = MyQueue()
q.push(self.root )
A_ = self.get_height()
if layer == 0:
return output
A_ = 0
while not q.is_empty():
A_ = q.pop()
A_ = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCamelCase__ )
q.push(UpperCamelCase__ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
A_ = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , UpperCamelCase__ ) - 1:
A_ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def UpperCAmelCase__ ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__lowerCamelCase = AVLtree()
__lowerCamelCase = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 288
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class A__ :
def __init__( self ) -> None:
'''simple docstring'''
A_ = []
A_ = 0
A_ = 0
def snake_case_ ( self ) -> bool:
'''simple docstring'''
return self.head == self.tail
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
self.data.append(UpperCamelCase__ )
A_ = self.tail + 1
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.data[self.head]
A_ = self.head + 1
return ret
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self.tail - self.head
def snake_case_ ( self ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class A__ :
def __init__( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = data
A_ = None
A_ = None
A_ = 1
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return self.data
def snake_case_ ( self ) -> MyNode | None:
'''simple docstring'''
return self.left
def snake_case_ ( self ) -> MyNode | None:
'''simple docstring'''
return self.right
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self.height
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = data
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = node
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = node
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = height
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if node is None:
return 0
return node.get_height()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if a > b:
return a
return b
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> MyNode:
print("""left rotation node:""", node.get_data() )
A_ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(UpperCAmelCase__ )
A_ = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
A_ = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> MyNode:
print("""right rotation node:""", node.get_data() )
A_ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(UpperCAmelCase__ )
A_ = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
A_ = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> MyNode:
A_ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(UpperCAmelCase__ ) )
return right_rotation(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> MyNode:
A_ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(UpperCAmelCase__ ) )
return left_rotation(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> MyNode | None:
if node is None:
return MyNode(UpperCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left(), UpperCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
A_ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
A_ = right_rotation(UpperCAmelCase__ )
else:
A_ = lr_rotation(UpperCAmelCase__ )
else:
node.set_right(insert_node(node.get_right(), UpperCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
A_ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
A_ = rl_rotation(UpperCAmelCase__ )
else:
A_ = left_rotation(UpperCAmelCase__ )
A_ = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
return node
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
while True:
A_ = root.get_right()
if right_child is None:
break
A_ = right_child
return root.get_data()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
while True:
A_ = root.get_left()
if left_child is None:
break
A_ = left_child
return root.get_data()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> MyNode | None:
A_ = root.get_left()
A_ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
A_ = get_left_most(UpperCAmelCase__ )
root.set_data(UpperCAmelCase__ )
root.set_right(del_node(UpperCAmelCase__, UpperCAmelCase__ ) )
elif left_child is not None:
A_ = left_child
elif right_child is not None:
A_ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(UpperCAmelCase__, UpperCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(UpperCAmelCase__, UpperCAmelCase__ ) )
if get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
A_ = left_rotation(UpperCAmelCase__ )
else:
A_ = rl_rotation(UpperCAmelCase__ )
elif get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
A_ = right_rotation(UpperCAmelCase__ )
else:
A_ = lr_rotation(UpperCAmelCase__ )
A_ = my_max(get_height(root.get_right() ), get_height(root.get_left() ) ) + 1
root.set_height(UpperCAmelCase__ )
return root
class A__ :
def __init__( self ) -> None:
'''simple docstring'''
A_ = None
def snake_case_ ( self ) -> int:
'''simple docstring'''
return get_height(self.root )
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
print("""insert:""" + str(UpperCamelCase__ ) )
A_ = insert_node(self.root , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
print("""delete:""" + str(UpperCamelCase__ ) )
if self.root is None:
print("""Tree is empty!""" )
return
A_ = del_node(self.root , UpperCamelCase__ )
def __str__( self , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
A_ = """"""
A_ = MyQueue()
q.push(self.root )
A_ = self.get_height()
if layer == 0:
return output
A_ = 0
while not q.is_empty():
A_ = q.pop()
A_ = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCamelCase__ )
q.push(UpperCamelCase__ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
A_ = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , UpperCamelCase__ ) - 1:
A_ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def UpperCAmelCase__ ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__lowerCamelCase = AVLtree()
__lowerCamelCase = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 288
| 1
|
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__(self , A ):
lowerCamelCase_ : Optional[int] = data
lowerCamelCase_ : Node | None = None
lowerCamelCase_ : Node | None = None
def lowercase_ ( _lowercase ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
lowerCamelCase_ : str = Node(1 )
lowerCamelCase_ : Tuple = Node(2 )
lowerCamelCase_ : Optional[int] = Node(3 )
lowerCamelCase_ : List[str] = Node(4 )
lowerCamelCase_ : str = Node(5 )
lowerCamelCase_ : int = Node(6 )
lowerCamelCase_ : Optional[int] = Node(7 )
lowerCamelCase_ : str = Node(8 )
lowerCamelCase_ : str = Node(9 )
print(is_full_binary_tree(_lowercase ) )
print(depth_of_tree(_lowercase ) )
print('''Tree is: ''' )
display(_lowercase )
if __name__ == "__main__":
main()
| 720
|
'''simple docstring'''
def lowercase_ ( _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : List[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ : str = 0
while b > 0:
if b & 1:
lowerCamelCase_ : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 357
| 0
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = IFImgaImgSuperResolutionPipeline
_UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
_UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'latents'}
def snake_case ( self : List[str] ):
return self._get_superresolution_dummy_components()
def snake_case ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : Tuple=0 ):
if str(__snake_case ).startswith('''mps''' ):
lowerCamelCase :Optional[int] = torch.manual_seed(__snake_case )
else:
lowerCamelCase :Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
lowerCamelCase :Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
lowerCamelCase :Union[str, Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(__snake_case ) ).to(__snake_case )
lowerCamelCase :List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def snake_case ( self : List[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case ( self : List[str] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case ( self : Optional[int] ):
self._test_save_load_local()
def snake_case ( self : List[str] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 166
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
A__ = False
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case ( self : List[Any] ):
return 12
@property
def snake_case ( self : Union[str, Any] ):
return 12
@property
def snake_case ( self : int ):
return 32
@property
def snake_case ( self : Any ):
torch.manual_seed(0 )
lowerCamelCase :Optional[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def snake_case ( self : List[Any] ):
lowerCamelCase :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def snake_case ( self : Tuple ):
torch.manual_seed(0 )
lowerCamelCase :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__snake_case )
@property
def snake_case ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCamelCase :int = 12
lowerCamelCase :Dict = 12
lowerCamelCase :List[Any] = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowerCamelCase :Dict = TransformeraDModel(**__snake_case )
return model
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Any = '''cpu'''
lowerCamelCase :Tuple = self.dummy_vqvae
lowerCamelCase :List[str] = self.dummy_text_encoder
lowerCamelCase :Optional[Any] = self.dummy_tokenizer
lowerCamelCase :Tuple = self.dummy_transformer
lowerCamelCase :Tuple = VQDiffusionScheduler(self.num_embed )
lowerCamelCase :Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__snake_case )
lowerCamelCase :Any = VQDiffusionPipeline(
vqvae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , transformer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
lowerCamelCase :Tuple = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase :Any = '''teddy bear playing in the pool'''
lowerCamelCase :Dict = torch.Generator(device=__snake_case ).manual_seed(0 )
lowerCamelCase :Optional[int] = pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type='''np''' )
lowerCamelCase :List[str] = output.images
lowerCamelCase :Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(0 )
lowerCamelCase :Dict = pipe(
[prompt] , generator=__snake_case , output_type='''np''' , return_dict=__snake_case , num_inference_steps=2 )[0]
lowerCamelCase :Tuple = image[0, -3:, -3:, -1]
lowerCamelCase :Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowerCamelCase :List[Any] = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self : Any ):
lowerCamelCase :Any = '''cpu'''
lowerCamelCase :Tuple = self.dummy_vqvae
lowerCamelCase :Optional[int] = self.dummy_text_encoder
lowerCamelCase :str = self.dummy_tokenizer
lowerCamelCase :List[str] = self.dummy_transformer
lowerCamelCase :Any = VQDiffusionScheduler(self.num_embed )
lowerCamelCase :Tuple = LearnedClassifierFreeSamplingEmbeddings(
learnable=__snake_case , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCamelCase :List[Any] = VQDiffusionPipeline(
vqvae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , transformer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
lowerCamelCase :Union[str, Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase :List[str] = '''teddy bear playing in the pool'''
lowerCamelCase :str = torch.Generator(device=__snake_case ).manual_seed(0 )
lowerCamelCase :Optional[int] = pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type='''np''' )
lowerCamelCase :int = output.images
lowerCamelCase :Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(0 )
lowerCamelCase :Optional[Any] = pipe(
[prompt] , generator=__snake_case , output_type='''np''' , return_dict=__snake_case , num_inference_steps=2 )[0]
lowerCamelCase :str = image[0, -3:, -3:, -1]
lowerCamelCase :Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowerCamelCase :str = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : List[Any] ):
lowerCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowerCamelCase :List[Any] = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowerCamelCase :List[str] = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCamelCase :Tuple = torch.Generator(device=__snake_case ).manual_seed(0 )
lowerCamelCase :Optional[int] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__snake_case , output_type='''np''' , )
lowerCamelCase :Any = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 166
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ :Union[str, Any] = logging.get_logger(__name__)
snake_case_ :Dict = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "deit"
def __init__( self : Tuple , snake_case : List[str]=768 , snake_case : Optional[int]=12 , snake_case : List[Any]=12 , snake_case : str=3072 , snake_case : List[Any]="gelu" , snake_case : List[Any]=0.0 , snake_case : Dict=0.0 , snake_case : Optional[int]=0.02 , snake_case : Optional[int]=1E-12 , snake_case : Any=224 , snake_case : Optional[Any]=16 , snake_case : Any=3 , snake_case : int=True , snake_case : Optional[int]=16 , **snake_case : Tuple , ) -> Any:
super().__init__(**snake_case )
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : int = layer_norm_eps
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : int = patch_size
__UpperCAmelCase : str = num_channels
__UpperCAmelCase : Optional[Any] = qkv_bias
__UpperCAmelCase : List[Any] = encoder_stride
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ) -> float:
return 1E-4
| 700
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class a :
"""simple docstring"""
def __init__( self : Any , snake_case : int | None = None ) -> int:
__UpperCAmelCase : str = value
__UpperCAmelCase : Node | None = None # Added in order to delete a node easier
__UpperCAmelCase : Node | None = None
__UpperCAmelCase : Node | None = None
def __repr__( self : str ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'{self.value}': (self.left, self.right)} , indent=1 )
class a :
"""simple docstring"""
def __init__( self : Optional[int] , snake_case : Node | None = None ) -> str:
__UpperCAmelCase : Optional[Any] = root
def __str__( self : str ) -> str:
return str(self.root )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : Node , snake_case : Node | None ) -> None:
if new_children is not None: # reset its kids
__UpperCAmelCase : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(snake_case ): # If it is the right children
__UpperCAmelCase : int = new_children
else:
__UpperCAmelCase : Tuple = new_children
else:
__UpperCAmelCase : List[Any] = new_children
def lowerCamelCase__ ( self : Optional[int] , snake_case : Node ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowerCamelCase__ ( self : int ) -> bool:
return self.root is None
def lowerCamelCase__ ( self : Optional[int] , snake_case : Optional[Any] ) -> None:
__UpperCAmelCase : int = Node(snake_case ) # create a new Node
if self.empty(): # if Tree is empty
__UpperCAmelCase : List[Any] = new_node # set its root
else: # Tree is not empty
__UpperCAmelCase : Tuple = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__UpperCAmelCase : Optional[int] = new_node # We insert the new node in a leaf
break
else:
__UpperCAmelCase : List[Any] = parent_node.left
else:
if parent_node.right is None:
__UpperCAmelCase : Optional[int] = new_node
break
else:
__UpperCAmelCase : List[str] = parent_node.right
__UpperCAmelCase : int = parent_node
def lowerCamelCase__ ( self : Optional[int] , *snake_case : List[Any] ) -> None:
for value in values:
self.__insert(snake_case )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Dict ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
__UpperCAmelCase : List[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__UpperCAmelCase : Union[str, Any] = node.left if value < node.value else node.right
return node
def lowerCamelCase__ ( self : str , snake_case : Node | None = None ) -> Node | None:
if node is None:
if self.root is None:
return None
__UpperCAmelCase : Optional[Any] = self.root
if not self.empty():
while node.right is not None:
__UpperCAmelCase : str = node.right
return node
def lowerCamelCase__ ( self : int , snake_case : Node | None = None ) -> Node | None:
if node is None:
__UpperCAmelCase : str = self.root
if self.root is None:
return None
if not self.empty():
__UpperCAmelCase : List[str] = self.root
while node.left is not None:
__UpperCAmelCase : str = node.left
return node
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> None:
__UpperCAmelCase : List[str] = self.search(snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(snake_case , snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(snake_case , node.left )
else:
__UpperCAmelCase : Optional[int] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__UpperCAmelCase : List[str] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowerCamelCase__ ( self : List[str] , snake_case : Node | None ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowerCamelCase__ ( self : str , snake_case : list , snake_case : Node | None ) -> None:
if node:
self.inorder(snake_case , node.left )
arr.append(node.value )
self.inorder(snake_case , node.right )
def lowerCamelCase__ ( self : Optional[int] , snake_case : int , snake_case : Node ) -> int:
__UpperCAmelCase : list[int] = []
self.inorder(snake_case , snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def _a ( _lowercase : Node | None ):
'''simple docstring'''
__UpperCAmelCase : List[str] = []
if curr_node is not None:
__UpperCAmelCase : Union[str, Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__UpperCAmelCase : Union[str, Any] = BinarySearchTree()
for i in testlist:
t.insert(_lowercase )
# Prints all the elements of the list in order traversal
print(_lowercase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowercase )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 266
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowercase_ :
__lowerCamelCase = PegasusConfig
__lowerCamelCase = {}
__lowerCamelCase = "gelu"
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A=0.1 , __A=0.1 , __A=40 , __A=2 , __A=1 , __A=0 , ) -> Dict:
SCREAMING_SNAKE_CASE_ : Dict =parent
SCREAMING_SNAKE_CASE_ : List[str] =batch_size
SCREAMING_SNAKE_CASE_ : Tuple =seq_length
SCREAMING_SNAKE_CASE_ : Optional[Any] =is_training
SCREAMING_SNAKE_CASE_ : Any =use_labels
SCREAMING_SNAKE_CASE_ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] =hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] =num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] =num_attention_heads
SCREAMING_SNAKE_CASE_ : str =intermediate_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] =eos_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] =pad_token_id
SCREAMING_SNAKE_CASE_ : Any =bos_token_id
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[int] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] =tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE_ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE_ : List[Any] =prepare_pegasus_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _snake_case ( self , __A , __A ) -> int:
SCREAMING_SNAKE_CASE_ : int =TFPegasusModel(config=__A ).get_decoder()
SCREAMING_SNAKE_CASE_ : Dict =inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : Any =input_ids[:1, :]
SCREAMING_SNAKE_CASE_ : Tuple =inputs_dict['''attention_mask'''][:1, :]
SCREAMING_SNAKE_CASE_ : List[str] =inputs_dict['''head_mask''']
SCREAMING_SNAKE_CASE_ : List[str] =1
# first forward pass
SCREAMING_SNAKE_CASE_ : str =model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Tuple =ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ : Tuple =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : List[Any] =tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE_ : List[str] =model(__A , attention_mask=__A )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =model(__A , attention_mask=__A , past_key_values=__A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE_ : Any =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE_ : List[str] =output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE_ : str =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__A , __A , rtol=1e-3 )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : int=None , ) -> List[Any]:
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[int] =tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : List[Any] =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE_ : str =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase_ ( A , A , unittest.TestCase ):
__lowerCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__lowerCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Dict =TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =ConfigTester(self , config_class=__A )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : str =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowercase_ ( unittest.TestCase ):
__lowerCamelCase = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__lowerCamelCase = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__lowerCamelCase = "google/pegasus-xsum"
@cached_property
def _snake_case ( self ) -> str:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : Optional[Any] =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _snake_case ( self , **__A ) -> int:
SCREAMING_SNAKE_CASE_ : Dict =self.translate_src_text(**__A )
assert self.expected_text == generated_words
def _snake_case ( self , **__A ) -> Dict:
SCREAMING_SNAKE_CASE_ : Any =self.tokenizer(self.src_text , **__A , padding=__A , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE_ : Dict =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__A , )
SCREAMING_SNAKE_CASE_ : List[str] =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__A )
return generated_words
@slow
def _snake_case ( self ) -> List[str]:
self._assert_generated_batch_equal_expected()
| 443
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , __A=1 / 255 , __A=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_ : Any =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
SCREAMING_SNAKE_CASE_ : Dict =parent
SCREAMING_SNAKE_CASE_ : Optional[Any] =batch_size
SCREAMING_SNAKE_CASE_ : List[Any] =num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] =min_resolution
SCREAMING_SNAKE_CASE_ : str =max_resolution
SCREAMING_SNAKE_CASE_ : int =do_resize
SCREAMING_SNAKE_CASE_ : Optional[int] =size
SCREAMING_SNAKE_CASE_ : str =do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] =image_mean
SCREAMING_SNAKE_CASE_ : Any =image_std
SCREAMING_SNAKE_CASE_ : Optional[int] =do_rescale
SCREAMING_SNAKE_CASE_ : Union[str, Any] =rescale_factor
SCREAMING_SNAKE_CASE_ : str =do_pad
def _snake_case ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _snake_case ( self , __A , __A=False ) -> Any:
if not batched:
SCREAMING_SNAKE_CASE_ : str =image_inputs[0]
if isinstance(__A , Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] =image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ : List[Any] =int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE_ : List[Any] =self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ : Dict =int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE_ : str =self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ : str =self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE_ : Any =[]
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ : str =max(__A , key=lambda __A : item[0] )[0]
SCREAMING_SNAKE_CASE_ : List[Any] =max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( A , unittest.TestCase ):
__lowerCamelCase = YolosImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : str =YolosImageProcessingTester(self )
@property
def _snake_case ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''image_mean''' ) )
self.assertTrue(hasattr(__A , '''image_std''' ) )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''do_resize''' ) )
self.assertTrue(hasattr(__A , '''size''' ) )
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : str =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , __A )
SCREAMING_SNAKE_CASE_ : Dict =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __A )
def _snake_case ( self ) -> List[str]:
pass
def _snake_case ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : str =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[str] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =self.image_processor_tester.get_expected_values(__A , batched=__A )
SCREAMING_SNAKE_CASE_ : str =image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Dict =image_processing(__A , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : str =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Any =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : str =image_processing(__A , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] =self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE_ : List[str] =self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE_ : str =self.image_processing_class(do_resize=__A , do_normalize=__A , do_rescale=__A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE_ : List[Any] =image_processing_a.pad(__A , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image_processing_a(__A , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def _snake_case ( self ) -> Any:
# prepare image and target
SCREAMING_SNAKE_CASE_ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] =json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict ={'''image_id''': 39_769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE_ : Union[str, Any] =YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE_ : Dict =image_processing(images=__A , annotations=__A , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Dict =torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , __A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Dict =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : str =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ : str =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) )
# verify size
SCREAMING_SNAKE_CASE_ : Dict =torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) )
@slow
def _snake_case ( self ) -> Tuple:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] =json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
SCREAMING_SNAKE_CASE_ : Tuple =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE_ : Optional[Any] =YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE_ : Optional[int] =image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : List[Any] =torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Tuple =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Any =torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) )
# verify masks
SCREAMING_SNAKE_CASE_ : List[Any] =822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __A )
# verify orig_size
SCREAMING_SNAKE_CASE_ : List[str] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) )
# verify size
SCREAMING_SNAKE_CASE_ : Any =torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) )
| 443
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __magic_name__ :
UpperCamelCase_ = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
UpperCamelCase_ = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
UpperCamelCase_ = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Union[str, Any] = HfArgumentParser((ModelArguments,) )
((_lowercase) , ): str = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_lowercase: Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_lowercase: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_lowercase: Tuple = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_lowercase: Dict = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_lowercase: Tuple = True
_lowercase: Any = True
_lowercase: List[Any] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=_UpperCamelCase , decoder_config=_UpperCamelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_lowercase: str = decoder_config.decoder_start_token_id
_lowercase: int = decoder_config.pad_token_id
if decoder_start_token_id is None:
_lowercase: List[Any] = decoder_config.bos_token_id
if pad_token_id is None:
_lowercase: List[Any] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_lowercase: Union[str, Any] = decoder_config.eos_token_id
_lowercase: List[str] = decoder_start_token_id
_lowercase: str = pad_token_id
_lowercase: Dict = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_lowercase: Optional[int] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_lowercase: Dict = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 272
|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
A__ : Tuple = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
A__ : Optional[int] = dataset.iloc[:, 1:2].values
A__ : Union[str, Any] = dataset.iloc[:, 2].values
A__ , A__ , A__ , A__ : Dict = train_test_split(X, y, test_size=0.2, random_state=0)
A__ : Union[str, Any] = PolynomialFeatures(degree=4)
A__ : List[Any] = poly_reg.fit_transform(X)
A__ : Dict = LinearRegression()
pol_reg.fit(X_poly, y)
def _lowerCAmelCase ( ):
"""simple docstring"""
plt.scatter(_UpperCamelCase , _UpperCamelCase , color='''red''' )
plt.plot(_UpperCamelCase , pol_reg.predict(poly_reg.fit_transform(_UpperCamelCase ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 272
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = 'xlm-roberta'
def __init__( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any]=30_522 , lowerCamelCase__ : Tuple=768 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Tuple=12 , lowerCamelCase__ : Dict=3_072 , lowerCamelCase__ : str="gelu" , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : Tuple=512 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Optional[int]=1e-1_2 , lowerCamelCase__ : List[Any]=1 , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : Any=2 , lowerCamelCase__ : List[str]="absolute" , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : Dict , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 332
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332
| 1
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = OmegaConf.load(SCREAMING_SNAKE_CASE__ )
if display:
print(yaml.dump(OmegaConf.to_container(SCREAMING_SNAKE_CASE__ ) ) )
return config
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : str=None ) -> int:
'''simple docstring'''
if conf_path is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
SCREAMING_SNAKE_CASE__ : Any = load_config(SCREAMING_SNAKE_CASE__ , display=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = VQModel(**config.model.params )
if ckpt_path is None:
SCREAMING_SNAKE_CASE__ : Dict = "./model_checkpoints/vqgan_only.pt"
SCREAMING_SNAKE_CASE__ : Dict = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ )
if ".ckpt" in ckpt_path:
SCREAMING_SNAKE_CASE__ : Dict = sd["state_dict"]
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
del sd
return model
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = model.encode(SCREAMING_SNAKE_CASE__ )
print(f'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.decode(SCREAMING_SNAKE_CASE__ )
return xrec
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = string.rsplit("." , 1 )
if reload:
SCREAMING_SNAKE_CASE__ : Any = importlib.import_module(SCREAMING_SNAKE_CASE__ )
importlib.reload(SCREAMING_SNAKE_CASE__ )
return getattr(importlib.import_module(SCREAMING_SNAKE_CASE__ , package=SCREAMING_SNAKE_CASE__ ) , cls )
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = instantiate_from_config(SCREAMING_SNAKE_CASE__ )
if sd is not None:
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if ckpt:
SCREAMING_SNAKE_CASE__ : int = torch.load(SCREAMING_SNAKE_CASE__ , map_location="cpu" )
SCREAMING_SNAKE_CASE__ : Tuple = pl_sd["global_step"]
print(f'''loaded model from global step {global_step}.''' )
else:
SCREAMING_SNAKE_CASE__ : int = {"state_dict": None}
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=SCREAMING_SNAKE_CASE__ , eval_mode=SCREAMING_SNAKE_CASE__ )["model"]
return model, global_step
| 157
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def _a ( SCREAMING_SNAKE_CASE__ : SplitDict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE__ : Dict = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE__ : List[str] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE__ ), SplitInfo(dataset_name="my_dataset" )] )
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 157
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = DiTPipeline
_lowercase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_lowercase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
_lowercase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_lowercase = False
def _UpperCamelCase( self : Union[str, Any] ):
torch.manual_seed(0 )
a__ : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase__ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowerCamelCase__ , )
a__ : List[str] = AutoencoderKL()
a__ : str = DDIMScheduler()
a__ : str = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str]=0 ):
if str(lowerCamelCase__ ).startswith("mps" ):
a__ : Any = torch.manual_seed(lowerCamelCase__ )
else:
a__ : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
a__ : int = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase( self : List[str] ):
a__ : int = "cpu"
a__ : Optional[int] = self.get_dummy_components()
a__ : List[Any] = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : str = self.get_dummy_inputs(lowerCamelCase__ )
a__ : Optional[int] = pipe(**lowerCamelCase__ ).images
a__ : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
a__ : Dict = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
a__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
def _UpperCamelCase( self : str ):
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase__ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCamelCase( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : int ):
a__ : int = torch.manual_seed(0 )
a__ : Any = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
a__ : Any = ["vase", "umbrella", "white shark", "white wolf"]
a__ : Tuple = pipe.get_label_ids(lowerCamelCase__ )
a__ : int = pipe(lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(lowerCamelCase__ , lowerCamelCase__ ):
a__ : str = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def _UpperCamelCase( self : Dict ):
a__ : List[Any] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
a__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
a__ : List[str] = ["vase", "umbrella"]
a__ : List[str] = pipe.get_label_ids(lowerCamelCase__ )
a__ : Optional[Any] = torch.manual_seed(0 )
a__ : Union[str, Any] = pipe(lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(lowerCamelCase__ , lowerCamelCase__ ):
a__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 37
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
SCREAMING_SNAKE_CASE__ : Any = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
SCREAMING_SNAKE_CASE__ : Tuple = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
SCREAMING_SNAKE_CASE__ : int = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"num_train_timesteps": 40,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"num_train_timesteps": 201,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
SCREAMING_SNAKE_CASE__ : Tuple = {
"num_train_timesteps": 151,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Union[str, Any]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str=False ) -> List[str]:
__lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
__lowerCamelCase = checkpoint[f'''{old_prefix}.skip_connection.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : int=None ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
__lowerCamelCase = checkpoint[f'''{old_prefix}.norm.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.norm.bias''']
__lowerCamelCase = weight_q.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_q.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = weight_k.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_k.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = weight_v.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_v.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
__lowerCamelCase = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Optional[int]:
__lowerCamelCase = torch.load(__lowerCAmelCase , map_location='''cpu''' )
__lowerCamelCase = {}
__lowerCamelCase = checkpoint['''time_embed.0.weight''']
__lowerCamelCase = checkpoint['''time_embed.0.bias''']
__lowerCamelCase = checkpoint['''time_embed.2.weight''']
__lowerCamelCase = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
__lowerCamelCase = checkpoint['''label_emb.weight''']
__lowerCamelCase = checkpoint['''input_blocks.0.0.weight''']
__lowerCamelCase = checkpoint['''input_blocks.0.0.bias''']
__lowerCamelCase = unet_config['''down_block_types''']
__lowerCamelCase = unet_config['''layers_per_block''']
__lowerCamelCase = unet_config['''attention_head_dim''']
__lowerCamelCase = unet_config['''block_out_channels''']
__lowerCamelCase = 1
__lowerCamelCase = channels_list[0]
for i, layer_type in enumerate(__lowerCAmelCase ):
__lowerCamelCase = channels_list[i]
__lowerCamelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowerCAmelCase ):
__lowerCamelCase = f'''down_blocks.{i}.resnets.{j}'''
__lowerCamelCase = f'''input_blocks.{current_layer}.0'''
__lowerCamelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowerCAmelCase ):
__lowerCamelCase = f'''down_blocks.{i}.resnets.{j}'''
__lowerCamelCase = f'''input_blocks.{current_layer}.0'''
__lowerCamelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
__lowerCamelCase = f'''down_blocks.{i}.attentions.{j}'''
__lowerCamelCase = f'''input_blocks.{current_layer}.1'''
__lowerCamelCase = convert_attention(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
__lowerCamelCase = f'''down_blocks.{i}.downsamplers.0'''
__lowerCamelCase = f'''input_blocks.{current_layer}.0'''
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
__lowerCamelCase = current_channels
# hardcoded the mid-block for now
__lowerCamelCase = '''mid_block.resnets.0'''
__lowerCamelCase = '''middle_block.0'''
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = '''mid_block.attentions.0'''
__lowerCamelCase = '''middle_block.1'''
__lowerCamelCase = convert_attention(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = '''mid_block.resnets.1'''
__lowerCamelCase = '''middle_block.2'''
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = 0
__lowerCamelCase = unet_config['''up_block_types''']
for i, layer_type in enumerate(__lowerCAmelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCamelCase = f'''up_blocks.{i}.resnets.{j}'''
__lowerCamelCase = f'''output_blocks.{current_layer}.0'''
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
__lowerCamelCase = f'''up_blocks.{i}.upsamplers.0'''
__lowerCamelCase = f'''output_blocks.{current_layer-1}.1'''
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCamelCase = f'''up_blocks.{i}.resnets.{j}'''
__lowerCamelCase = f'''output_blocks.{current_layer}.0'''
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
__lowerCamelCase = f'''up_blocks.{i}.attentions.{j}'''
__lowerCamelCase = f'''output_blocks.{current_layer}.1'''
__lowerCamelCase = convert_attention(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
__lowerCamelCase = f'''up_blocks.{i}.upsamplers.0'''
__lowerCamelCase = f'''output_blocks.{current_layer-1}.2'''
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = checkpoint['''out.0.weight''']
__lowerCamelCase = checkpoint['''out.0.bias''']
__lowerCamelCase = checkpoint['''out.2.weight''']
__lowerCamelCase = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Dict = strabool(args.class_cond)
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
SCREAMING_SNAKE_CASE__ : str = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
SCREAMING_SNAKE_CASE__ : Tuple = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
SCREAMING_SNAKE_CASE__ : Any = TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Tuple = con_pt_to_diffuser(args.unet_path, unet_config)
SCREAMING_SNAKE_CASE__ : Tuple = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
SCREAMING_SNAKE_CASE__ : Any = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
SCREAMING_SNAKE_CASE__ : List[str] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
SCREAMING_SNAKE_CASE__ : List[Any] = CMStochasticIterativeScheduler(**scheduler_config)
SCREAMING_SNAKE_CASE__ : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 298
| 0
|
"""simple docstring"""
import math
def _snake_case ( snake_case__ : int = 100 ):
A = sum(i * i for i in range(1 , n + 1 ) )
A = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 22
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 22
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def _snake_case ( __snake_case : int = 2000000 ):
"""simple docstring"""
_lowerCamelCase : list[int] = [0]
_lowerCamelCase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowerCamelCase : int = 0
# the area corresponding to the grid that gives the product closest to target
_lowerCamelCase : int = 0
# an estimate of b, using the quadratic formula
_lowerCamelCase : float
# the largest integer less than b_estimate
_lowerCamelCase : int
# the largest integer less than b_estimate
_lowerCamelCase : int
# the triangle number corresponding to b_floor
_lowerCamelCase : int
# the triangle number corresponding to b_ceil
_lowerCamelCase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowerCamelCase : str = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowerCamelCase : List[str] = floor(__snake_case )
_lowerCamelCase : Dict = ceil(__snake_case )
_lowerCamelCase : str = triangle_numbers[b_floor]
_lowerCamelCase : Optional[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase : List[str] = triangle_b_first_guess * triangle_a
_lowerCamelCase : Optional[int] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase : List[str] = triangle_b_second_guess * triangle_a
_lowerCamelCase : Union[str, Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , a , a):
lowercase__ : Any = params
lowercase__ : str = np.array(a)
lowercase__ : List[Any] = np.array([len(a) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , a):
return (self.token_ids[index], self.lengths[index])
def __len__( self):
return len(self.lengths)
def snake_case_ ( self):
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def snake_case_ ( self):
lowercase__ : Any = self.params.max_model_input_size
lowercase__ : int = self.lengths > max_len
logger.info(f"""Splitting {sum(a)} too long sequences.""")
def divide_chunks(a , a):
return [l[i : i + n] for i in range(0 , len(a) , a)]
lowercase__ : str = []
lowercase__ : Optional[int] = []
if self.params.mlm:
lowercase__ , lowercase__ : int = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowercase__ , lowercase__ : List[str] = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
lowercase__ : str = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
lowercase__ : Dict = np.insert(a , 0 , a)
if sub_s[-1] != sep_id:
lowercase__ : List[str] = np.insert(a , len(a) , a)
assert len(a) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a)
new_tok_ids.extend(a)
new_lengths.extend([len(a) for l in sub_seqs])
lowercase__ : Union[str, Any] = np.array(a)
lowercase__ : Tuple = np.array(a)
def snake_case_ ( self):
lowercase__ : Optional[int] = len(self)
lowercase__ : int = self.lengths > 11
lowercase__ : int = self.token_ids[indices]
lowercase__ : List[str] = self.lengths[indices]
lowercase__ : List[Any] = len(self)
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""")
def snake_case_ ( self):
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase__ : int = self.params.special_tok_ids['unk_token']
lowercase__ : Any = len(self)
lowercase__ : Optional[Any] = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
lowercase__ : List[str] = (unk_occs / self.lengths) < 0.5
lowercase__ : Optional[int] = self.token_ids[indices]
lowercase__ : str = self.lengths[indices]
lowercase__ : Union[str, Any] = len(self)
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""")
def snake_case_ ( self):
if not self.params.is_master:
return
logger.info(f"""{len(self)} sequences""")
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def snake_case_ ( self , a):
lowercase__ : List[Any] = [t[0] for t in batch]
lowercase__ : Optional[Any] = [t[1] for t in batch]
assert len(a) == len(a)
# Max for paddings
lowercase__ : Optional[Any] = max(a)
# Pad token ids
if self.params.mlm:
lowercase__ : Optional[Any] = self.params.special_tok_ids['pad_token']
else:
lowercase__ : Any = self.params.special_tok_ids['unk_token']
lowercase__ : Union[str, Any] = [list(t.astype(a)) + [pad_idx] * (max_seq_len_ - len(a)) for t in token_ids]
assert len(tk_) == len(a)
assert all(len(a) == max_seq_len_ for t in tk_)
lowercase__ : Tuple = torch.tensor(tk_) # (bs, max_seq_len_)
lowercase__ : Union[str, Any] = torch.tensor(a) # (bs)
return tk_t, lg_t
| 164
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A : List[Any] = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A : Tuple = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''rag'''
lowerCamelCase__ = True
def __init__( self : Any , __magic_name__ : Optional[int]=None , __magic_name__ : Dict=True , __magic_name__ : List[Any]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Any=" / " , __magic_name__ : int=" // " , __magic_name__ : Any=5 , __magic_name__ : Dict=300 , __magic_name__ : Optional[Any]=768 , __magic_name__ : str=8 , __magic_name__ : List[Any]="wiki_dpr" , __magic_name__ : Any="train" , __magic_name__ : Any="compressed" , __magic_name__ : List[Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]=False , __magic_name__ : Union[str, Any]=False , __magic_name__ : List[str]=0.0 , __magic_name__ : Dict=True , __magic_name__ : str=False , __magic_name__ : int=False , __magic_name__ : Tuple=False , __magic_name__ : Tuple=True , __magic_name__ : Dict=None , **__magic_name__ : int , ) -> List[str]:
super().__init__(
bos_token_id=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , prefix=__magic_name__ , vocab_size=__magic_name__ , **__magic_name__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
SCREAMING_SNAKE_CASE_ = kwargs.pop("question_encoder" )
SCREAMING_SNAKE_CASE_ = question_encoder_config.pop("model_type" )
SCREAMING_SNAKE_CASE_ = kwargs.pop("generator" )
SCREAMING_SNAKE_CASE_ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE_ = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = reduce_loss
SCREAMING_SNAKE_CASE_ = label_smoothing
SCREAMING_SNAKE_CASE_ = exclude_bos_score
SCREAMING_SNAKE_CASE_ = do_marginalize
SCREAMING_SNAKE_CASE_ = title_sep
SCREAMING_SNAKE_CASE_ = doc_sep
SCREAMING_SNAKE_CASE_ = n_docs
SCREAMING_SNAKE_CASE_ = max_combined_length
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = dataset_split
SCREAMING_SNAKE_CASE_ = index_name
SCREAMING_SNAKE_CASE_ = retrieval_vector_size
SCREAMING_SNAKE_CASE_ = retrieval_batch_size
SCREAMING_SNAKE_CASE_ = passages_path
SCREAMING_SNAKE_CASE_ = index_path
SCREAMING_SNAKE_CASE_ = use_dummy_dataset
SCREAMING_SNAKE_CASE_ = output_retrieved
SCREAMING_SNAKE_CASE_ = do_deduplication
SCREAMING_SNAKE_CASE_ = use_cache
if self.forced_eos_token_id is None:
SCREAMING_SNAKE_CASE_ = getattr(self.generator , "forced_eos_token_id" , __magic_name__ )
@classmethod
def __A ( cls : Dict , __magic_name__ : PretrainedConfig , __magic_name__ : PretrainedConfig , **__magic_name__ : List[str] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__magic_name__ )
def __A ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.question_encoder.to_dict()
SCREAMING_SNAKE_CASE_ = self.generator.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 356
| 1
|
class lowercase :
def __init__( self ):
snake_case_ = ''
snake_case_ = ''
snake_case_ = []
def a ( self , snake_case , snake_case ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
snake_case_ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
snake_case_ = self.__min_dist_top_down_dp(snake_case , n - 1 )
snake_case_ = self.__min_dist_top_down_dp(m - 1 , snake_case )
snake_case_ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
snake_case_ = 1 + min(snake_case , snake_case , snake_case )
return self.dp[m][n]
def a ( self , snake_case , snake_case ):
snake_case_ = worda
snake_case_ = worda
snake_case_ = [[-1 for _ in range(len(snake_case ) )] for _ in range(len(snake_case ) )]
return self.__min_dist_top_down_dp(len(snake_case ) - 1 , len(snake_case ) - 1 )
def a ( self , snake_case , snake_case ):
snake_case_ = worda
snake_case_ = worda
snake_case_ = len(snake_case )
snake_case_ = len(snake_case )
snake_case_ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
snake_case_ = j
elif j == 0: # second string is empty
snake_case_ = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
snake_case_ = self.dp[i - 1][j - 1]
else:
snake_case_ = self.dp[i][j - 1]
snake_case_ = self.dp[i - 1][j]
snake_case_ = self.dp[i - 1][j - 1]
snake_case_ = 1 + min(snake_case , snake_case , snake_case )
return self.dp[m][n]
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
_UpperCAmelCase : Optional[Any] = input("""Enter the first string: """).strip()
_UpperCAmelCase : Tuple = input("""Enter the second string: """).strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 362
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = BertConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case_ = BertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 362
| 1
|
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : Tuple = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , __a ).groups()[0]
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,_a : Union[str, Any] ,_a : str=None ,_a : List[str]=None ):
'''simple docstring'''
_a : Optional[int] = file_names
_a : List[Any] = image_transform
_a : Tuple = label_to_id
def __len__( self : Any ):
'''simple docstring'''
return len(self.file_names )
def __getitem__( self : Tuple ,_a : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = self.file_names[idx]
_a : Optional[int] = PIL.Image.open(_a )
_a : Tuple = raw_image.convert('RGB' )
if self.image_transform is not None:
_a : Union[str, Any] = self.image_transform(_a )
_a : Union[str, Any] = extract_label(_a )
if self.label_to_id is not None:
_a : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def UpperCAmelCase_ (__a : Tuple , __a : str ):
"""simple docstring"""
if args.with_tracking:
_a : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
_a : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : str = config['lr']
_a : Optional[Any] = int(config['num_epochs'] )
_a : List[str] = int(config['seed'] )
_a : List[str] = int(config['batch_size'] )
_a : int = config['image_size']
if not isinstance(__a , (list, tuple) ):
_a : List[str] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
_a : Optional[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_a : Any = int(args.checkpointing_steps )
else:
raise ValueError(
f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
_a : Tuple = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_a : List[str] = os.path.split(__a )[-1].split('.' )[0]
accelerator.init_trackers(__a , __a )
# Grab all the image filenames
_a : List[str] = [os.path.join(args.data_dir , __a ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
_a : Union[str, Any] = [extract_label(__a ) for fname in file_names]
_a : Optional[int] = list(set(__a ) )
id_to_label.sort()
_a : List[Any] = {lbl: i for i, lbl in enumerate(__a )}
# Set the seed before splitting the data.
np.random.seed(__a )
torch.manual_seed(__a )
torch.cuda.manual_seed_all(__a )
# Split our filenames between train and validation
_a : Optional[int] = np.random.permutation(len(__a ) )
_a : Dict = int(0.8 * len(__a ) )
_a : List[Any] = random_perm[:cut]
_a : Tuple = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_a : Union[str, Any] = Compose([RandomResizedCrop(__a , scale=(0.5, 1.0) ), ToTensor()] )
_a : Dict = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__a , label_to_id=__a )
# For evaluation, we use a deterministic Resize
_a : Tuple = Compose([Resize(__a ), ToTensor()] )
_a : Union[str, Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=__a , label_to_id=__a )
# Instantiate dataloaders.
_a : Tuple = DataLoader(__a , shuffle=__a , batch_size=__a , num_workers=4 )
_a : List[Any] = DataLoader(__a , shuffle=__a , batch_size=__a , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : str = create_model('resnet50d' , pretrained=__a , num_classes=len(__a ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : str = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_a : Union[str, Any] = False
for param in model.get_classifier().parameters():
_a : Tuple = True
# We normalize the batches of images to be a bit faster.
_a : Optional[int] = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
_a : str = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_a : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 )
# Instantiate learning rate scheduler
_a : Tuple = OneCycleLR(optimizer=__a , max_lr=__a , epochs=__a , steps_per_epoch=len(__a ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a, _a, _a, _a, _a : List[Any] = accelerator.prepare(
__a , __a , __a , __a , __a )
# We need to keep track of how many total steps we have iterated over
_a : str = 0
# We also need to keep track of the starting epoch so files are named properly
_a : Any = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
_a : List[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_a : Tuple = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_a : Union[str, Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_a : Any = os.path.splitext(__a )[0]
if "epoch" in training_difference:
_a : Dict = int(training_difference.replace('epoch_' , '' ) ) + 1
_a : Any = None
else:
_a : Optional[int] = int(training_difference.replace('step_' , '' ) )
_a : int = resume_step // len(__a )
resume_step -= starting_epoch * len(__a )
# Now we train the model
for epoch in range(__a , __a ):
model.train()
if args.with_tracking:
_a : List[Any] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_a : Optional[int] = accelerator.skip_first_batches(__a , __a )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_a : Optional[Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_a : int = {k: v.to(accelerator.device ) for k, v in batch.items()}
_a : Optional[Any] = (batch['image'] - mean) / std
_a : Optional[int] = model(__a )
_a : Union[str, Any] = torch.nn.functional.cross_entropy(__a , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__a , __a ):
_a : List[Any] = f"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_a : Optional[Any] = os.path.join(args.output_dir , __a )
accelerator.save_state(__a )
model.eval()
_a : Tuple = 0
_a : int = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_a : List[str] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_a : Optional[Any] = (batch['image'] - mean) / std
with torch.no_grad():
_a : Tuple = model(__a )
_a : List[str] = outputs.argmax(dim=-1 )
_a, _a : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['label']) )
_a : Union[str, Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_a : List[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}: {1_0_0 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_0_0 * eval_metric,
'train_loss': total_loss.item() / len(__a ),
'epoch': epoch,
} , step=__a , )
if checkpointing_steps == "epoch":
_a : str = f"""epoch_{epoch}"""
if args.output_dir is not None:
_a : Tuple = os.path.join(args.output_dir , __a )
accelerator.save_state(__a )
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=__a , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=__a , default=__a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=__a , default=__a , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=__a , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=__a , default=__a , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=__a , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
_a : Optional[Any] = parser.parse_args()
_a : List[Any] = {'lr': 3e-2, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 6_4, 'image_size': 2_2_4}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 319
|
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**_a : Tuple ):
'''simple docstring'''
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self : Optional[Any] ,_a : Union[np.ndarray, bytes, str] ,**_a : List[Any] ):
'''simple docstring'''
return super().__call__(_a ,**_a )
def __lowercase ( self : Union[str, Any] ,**_a : Dict ):
'''simple docstring'''
_a : List[str] = {}
if "candidate_labels" in kwargs:
_a : Optional[int] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_a : Any = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowercase ( self : str ,_a : Optional[int] ,_a : Optional[int]=None ,_a : Any="This is a sound of {}." ):
'''simple docstring'''
if isinstance(_a ,_a ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_a : Any = requests.get(_a ).content
else:
with open(_a ,'rb' ) as f:
_a : List[str] = f.read()
if isinstance(_a ,_a ):
_a : Union[str, Any] = ffmpeg_read(_a ,self.feature_extractor.sampling_rate )
if not isinstance(_a ,np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
_a : Optional[int] = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors='pt' )
_a : int = candidate_labels
_a : Optional[Any] = [hypothesis_template.format(_a ) for x in candidate_labels]
_a : Union[str, Any] = self.tokenizer(_a ,return_tensors=self.framework ,padding=_a )
_a : Dict = [text_inputs]
return inputs
def __lowercase ( self : Tuple ,_a : Dict ):
'''simple docstring'''
_a : int = model_inputs.pop('candidate_labels' )
_a : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] ,_a ):
_a : List[str] = text_inputs[0]
else:
# Batching case.
_a : List[str] = text_inputs[0][0]
_a : Dict = self.model(**_a ,**_a )
_a : Dict = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def __lowercase ( self : List[str] ,_a : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = model_outputs.pop('candidate_labels' )
_a : int = model_outputs['logits'][0]
if self.framework == "pt":
_a : Optional[Any] = logits.softmax(dim=0 )
_a : Dict = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
_a : Tuple = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_a ,_a ) ,key=lambda _a : -x[0] )
]
return result
| 319
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.