code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''OwlViTFeatureExtractor''']
_SCREAMING_SNAKE_CASE = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
| 1
|
'''simple docstring'''
import cva
import numpy as np
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> str:
'''simple docstring'''
if k in (0.0_4, 0.0_6):
__lowercase = k
__lowercase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__(self ) -> str:
'''simple docstring'''
return str(self.k )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
__lowercase = cva.imread(_lowerCamelCase ,0 )
__lowercase , __lowercase = img.shape
__lowercase = []
__lowercase = img.copy()
__lowercase = cva.cvtColor(_lowerCamelCase ,cva.COLOR_GRAY2RGB )
__lowercase , __lowercase = np.gradient(_lowerCamelCase )
__lowercase = dx**2
__lowercase = dy**2
__lowercase = dx * dy
__lowercase = 0.0_4
__lowercase = self.window_size // 2
for y in range(_lowerCamelCase ,h - offset ):
for x in range(_lowerCamelCase ,w - offset ):
__lowercase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = (wxx * wyy) - (wxy**2)
__lowercase = wxx + wyy
__lowercase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,255 )
return color_img, corner_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = HarrisCorner(0.0_4, 3)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 56
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56
| 1
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_SCREAMING_SNAKE_CASE = 2
class __lowercase :
'''simple docstring'''
def __init__(self ,*, # begin keyword-only arguments
_lowerCamelCase="<s>" ,_lowerCamelCase="<pad>" ,_lowerCamelCase="</s>" ,_lowerCamelCase="<unk>" ,_lowerCamelCase=None ,) -> Tuple:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCamelCase )
__lowercase = self.add_symbol(_lowerCamelCase )
__lowercase = self.add_symbol(_lowerCamelCase )
__lowercase = self.add_symbol(_lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCamelCase )
__lowercase = len(self.symbols )
def __eq__(self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return self.indices == other.indices
def __getitem__(self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__(self ) -> Optional[Any]:
'''simple docstring'''
return len(self.symbols )
def __contains__(self ,_lowerCamelCase ) -> str:
'''simple docstring'''
return sym in self.indices
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = cls()
d.add_from_file(_lowerCamelCase )
return d
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=1 ,_lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCamelCase )
self.count.append(_lowerCamelCase )
return idx
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return 0
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
try:
with open(_lowerCamelCase ,'''r''' ,encoding='''utf-8''' ) as fd:
self.add_from_file(_lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(_lowerCamelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCamelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(''' ''' ,1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(''' ''' ,1 )
else:
__lowercase = False
__lowercase = int(_lowerCamelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(_lowerCamelCase ) )
self.add_symbol(_lowerCamelCase ,n=_lowerCamelCase ,overwrite=_lowerCamelCase )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowercase = dict((re.sub(r'''@@$''' , '''''' , lowerCamelCase_ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , lowerCamelCase_ ), v) for k, v in d.items() )
__lowercase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f"{k}</w>"]
__lowercase = d[k] # restore
return da
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] ):
# prep
if not os.path.exists(lowerCamelCase_ ):
raise ValueError(f"path {biogpt_checkpoint_path} does not exist!" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
print(f"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase_ , '''checkpoint.pt''' )
if not os.path.isfile(lowerCamelCase_ ):
raise ValueError(f"path to the file {checkpoint_file} does not exist!" )
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = chkpt['''cfg''']['''model''']
# dicts
__lowercase = os.path.join(lowerCamelCase_ , '''dict.txt''' )
if not os.path.isfile(lowerCamelCase_ ):
raise ValueError(f"path to the file {dict_file} does not exist!" )
__lowercase = Dictionary.load(lowerCamelCase_ )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase_ )
__lowercase = os.path.join(lowerCamelCase_ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f"Generating {src_vocab_file} of {src_vocab_size} records" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCamelCase_ , ensure_ascii=lowerCamelCase_ , indent=lowerCamelCase_ ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase_ , '''bpecodes''' )
if not os.path.isfile(lowerCamelCase_ ):
raise ValueError(f"path to the file {bpecodes_file} does not exist!" )
__lowercase = os.path.join(lowerCamelCase_ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(lowerCamelCase_ , lowerCamelCase_ )
# model config
__lowercase = os.path.join(lowerCamelCase_ , '''config.json''' )
__lowercase = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1E-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f"Generating {biogpt_model_config_file}" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCamelCase_ , ensure_ascii=lowerCamelCase_ , indent=lowerCamelCase_ ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1_0_2_4,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f"Generating {biogpt_tokenizer_config_file}" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCamelCase_ , ensure_ascii=lowerCamelCase_ , indent=lowerCamelCase_ ) )
# model
__lowercase = chkpt['''model''']
# remove unneeded keys
__lowercase = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
__lowercase = model_state_dict.pop(lowerCamelCase_ )
else:
__lowercase = model_state_dict.pop(lowerCamelCase_ )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase_ )
__lowercase = BioGptForCausalLM(lowerCamelCase_ )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase_ )
# save
__lowercase = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
print(f"Generating {pytorch_weights_dump_path}" )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
print('''Conversion is done!''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 56
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
| 1
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_SCREAMING_SNAKE_CASE = '''base_with_context'''
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] ):
__lowercase = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
__lowercase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=lowerCamelCase_ )
for lyr_num, lyr in enumerate(model.encoders ):
__lowercase = weights[f"layers_{lyr_num}"]
__lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
__lowercase = ly_weight['''attention''']
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple ):
__lowercase = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
__lowercase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=lowerCamelCase_ )
for lyr_num, lyr in enumerate(model.encoders ):
__lowercase = weights[f"layers_{lyr_num}"]
__lowercase = ly_weight['''attention''']
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__lowercase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
__lowercase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
__lowercase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=lowerCamelCase_ )
__lowercase = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__lowercase = weights[f"layers_{lyr_num}"]
__lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
__lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
__lowercase = ly_weight['''self_attention''']
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__lowercase = ly_weight['''MultiHeadDotProductAttention_0''']
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
__lowercase = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
__lowercase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__lowercase = jnp.tree_util.tree_map(onp.array , lowerCamelCase_ )
__lowercase = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
__lowercase = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
__lowercase = inference.parse_training_gin_file(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = inference.InferenceModel(args.checkpoint_path , lowerCamelCase_ )
__lowercase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
__lowercase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
__lowercase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
__lowercase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__lowercase = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , lowerCamelCase_ )
__lowercase = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , lowerCamelCase_ )
__lowercase = load_decoder(ta_checkpoint['''target''']['''decoder'''] , lowerCamelCase_ )
__lowercase = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
__lowercase = SpectrogramDiffusionPipeline(
notes_encoder=lowerCamelCase_ , continuous_encoder=lowerCamelCase_ , decoder=lowerCamelCase_ , scheduler=lowerCamelCase_ , melgan=lowerCamelCase_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56
| 1
|
'''simple docstring'''
from PIL import Image
def _lowerCAmelCase ( lowerCamelCase_ : Image ):
__lowercase , __lowercase = image.size
__lowercase = 0
__lowercase = image.load()
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
__lowercase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCamelCase_ ):
for i in range(lowerCamelCase_ ):
__lowercase = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 56
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 1
|
'''simple docstring'''
import math
from collections.abc import Callable
def _lowerCAmelCase ( lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float , lowerCamelCase_ : float ):
__lowercase = xa
__lowercase = xa
while True:
if x_n == x_na or function(lowerCamelCase_ ) == function(lowerCamelCase_ ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
__lowercase = x_na - (
function(lowerCamelCase_ ) / ((function(lowerCamelCase_ ) - function(lowerCamelCase_ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__lowercase = x_na
__lowercase = x_na
def _lowerCAmelCase ( lowerCamelCase_ : float ):
return math.pow(lowerCamelCase_ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56
| 1
|
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
try:
with open(lowerCamelCase_ , '''rb''' ) as flax_state_f:
__lowercase = from_bytes(lowerCamelCase_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowerCamelCase_ ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__lowercase = flatten_dict(jax.tree_util.tree_map(lambda lowerCamelCase_ : x.dtype == jnp.bfloataa , lowerCamelCase_ ) ).values()
if any(lowerCamelCase_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__lowercase = jax.tree_util.tree_map(
lambda lowerCamelCase_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCamelCase_ )
__lowercase = ''''''
__lowercase = flatten_dict(lowerCamelCase_ , sep='''.''' )
__lowercase = pt_model.state_dict()
# keep track of unexpected & missing keys
__lowercase = []
__lowercase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowercase = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__lowercase = flax_key_tuple_array[:-1] + ['''weight''']
__lowercase = jnp.transpose(lowerCamelCase_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__lowercase = flax_key_tuple_array[:-1] + ['''weight''']
__lowercase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__lowercase = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowerCamelCase_ ):
__lowercase = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
__lowercase = '''.'''.join(lowerCamelCase_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
__lowercase = np.asarray(lowerCamelCase_ ) if not isinstance(lowerCamelCase_ , np.ndarray ) else flax_tensor
__lowercase = torch.from_numpy(lowerCamelCase_ )
# remove from missing keys
missing_keys.remove(lowerCamelCase_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCamelCase_ )
pt_model.load_state_dict(lowerCamelCase_ )
# re-transform missing_keys to list
__lowercase = list(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(lowerCamelCase_ ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
''' use it for predictions and inference.''' )
return pt_model
| 56
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
import os
def _lowerCAmelCase ( ):
with open(os.path.dirname(lowerCamelCase_ ) + '''/p022_names.txt''' ) as file:
__lowercase = str(file.readlines()[0] )
__lowercase = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
__lowercase = 0
__lowercase = 0
for i, name in enumerate(lowerCamelCase_ ):
for letter in name:
name_score += ord(lowerCamelCase_ ) - 6_4
total_score += (i + 1) * name_score
__lowercase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : Union[tf.Tensor, np.ndarray] ):
if isinstance(lowerCamelCase_ , np.ndarray ):
return list(tensor.shape )
__lowercase = tf.shape(lowerCamelCase_ )
if tensor.shape == tf.TensorShape(lowerCamelCase_ ):
return dynamic
__lowercase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowerCamelCase_ )]
def _lowerCAmelCase ( lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=lowerCamelCase_ , name=lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Optional[int]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__lowercase , __lowercase = tf.nn.moments(lowerCamelCase_ , axes=[axis] , keepdims=lowerCamelCase_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__lowercase = [1] * inputs.shape.rank
__lowercase = shape_list(lowerCamelCase_ )[axis]
__lowercase = tf.reshape(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = tf.reshape(lowerCamelCase_ , lowerCamelCase_ )
# Compute layer normalization using the batch_normalization
# function.
__lowercase = tf.nn.batch_normalization(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , offset=lowerCamelCase_ , scale=lowerCamelCase_ , variance_epsilon=lowerCamelCase_ , )
return outputs
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Dict=0 , lowerCamelCase_ : Any=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__lowercase = tf.shape(lowerCamelCase_ )
__lowercase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__lowercase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : tf.Tensor ):
if not isinstance(lowerCamelCase_ , tf.Tensor ):
__lowercase = tf.convert_to_tensor(lowerCamelCase_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__lowercase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__lowercase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__lowercase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowerCAmelCase ( lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : int , lowerCamelCase_ : str = "input_ids" ):
tf.debugging.assert_less(
lowerCamelCase_ , tf.cast(lowerCamelCase_ , dtype=tensor.dtype ) , message=(
f"The maximum value of {tensor_name} ({tf.math.reduce_max(lowerCamelCase_ )}) must be smaller than the embedding "
f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ):
__lowercase = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__lowercase = [x for x in data if len(lowerCamelCase_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
f"bytes: {bad_attributes}" )
__lowercase = np.asarray(lowerCamelCase_ )
__lowercase = 1
__lowercase = np.array_split(lowerCamelCase_ , lowerCamelCase_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__lowercase = np.array_split(lowerCamelCase_ , lowerCamelCase_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowerCamelCase_ ):
__lowercase = chunk_data
else:
__lowercase = data
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
if name in group.attrs:
__lowercase = [n.decode('''utf8''' ) if hasattr(lowerCamelCase_ , '''decode''' ) else n for n in group.attrs[name]]
else:
__lowercase = []
__lowercase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(lowerCamelCase_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
def _expand_single_ad_tensor(lowerCamelCase_ : Tuple ):
if isinstance(lowerCamelCase_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowerCamelCase_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowerCamelCase_ )
| 56
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
| 1
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False, False, False
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[int] = None
a : bool = True
a : bool = True
a : Optional[str] = None
# Automatically constructed
a : ClassVar[str] = "dict"
a : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
a : str = field(default="Audio" , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def __call__(self ) -> Optional[int]:
'''simple docstring'''
return self.pa_type
def _UpperCAmelCase (self ,_lowerCamelCase ) -> dict:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__lowercase = BytesIO()
sf.write(_lowerCamelCase ,value['''array'''] ,value['''sampling_rate'''] ,format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__lowercase = np.frombuffer(value['''bytes'''] ,dtype=np.intaa ).astype(np.floataa ) / 32767
else:
__lowercase = np.memmap(value['''path'''] ,dtype='''h''' ,mode='''r''' ).astype(np.floataa ) / 32767
__lowercase = BytesIO(bytes() )
sf.write(_lowerCamelCase ,_lowerCamelCase ,value['''sampling_rate'''] ,format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
__lowercase , __lowercase = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
__lowercase = xsplitext(_lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
__lowercase = token_per_repo_id or {}
__lowercase = path.split('''::''' )[-1]
try:
__lowercase = string_to_dict(_lowerCamelCase ,config.HUB_DATASETS_URL )['''repo_id''']
__lowercase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__lowercase = None
with xopen(_lowerCamelCase ,'''rb''' ,use_auth_token=_lowerCamelCase ) as f:
__lowercase , __lowercase = sf.read(_lowerCamelCase )
else:
__lowercase , __lowercase = sf.read(_lowerCamelCase )
__lowercase = array.T
if self.mono:
__lowercase = librosa.to_mono(_lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__lowercase = librosa.resample(_lowerCamelCase ,orig_sr=_lowerCamelCase ,target_sr=self.sampling_rate )
__lowercase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _UpperCAmelCase (self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def _UpperCAmelCase (self ,_lowerCamelCase ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
__lowercase = pa.array([None] * len(_lowerCamelCase ) ,type=pa.binary() )
__lowercase = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowercase = pa.array([None] * len(_lowerCamelCase ) ,type=pa.string() )
__lowercase = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
__lowercase = pa.array([Audio().encode_example(_lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
__lowercase = storage.field('''bytes''' )
else:
__lowercase = pa.array([None] * len(_lowerCamelCase ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
__lowercase = storage.field('''path''' )
else:
__lowercase = pa.array([None] * len(_lowerCamelCase ) ,type=pa.string() )
__lowercase = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
return array_cast(_lowerCamelCase ,self.pa_type )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(_lowerCamelCase ):
with xopen(_lowerCamelCase ,'''rb''' ) as f:
__lowercase = f.read()
return bytes_
__lowercase = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
__lowercase = pa.array(
[os.path.basename(_lowerCamelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
__lowercase = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_lowerCamelCase ,self.pa_type )
| 56
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : bool = False ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__lowercase = f"Expected string as input, found {type(lowerCamelCase_ )}"
raise ValueError(lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__lowercase = f"Expected boolean as use_pascal parameter, found {type(lowerCamelCase_ )}"
raise ValueError(lowerCamelCase_ )
__lowercase = input_str.split('''_''' )
__lowercase = 0 if use_pascal else 1
__lowercase = words[start_index:]
__lowercase = [word[0].upper() + word[1:] for word in words_to_capitalize]
__lowercase = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
_SCREAMING_SNAKE_CASE = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[int] = VOCAB_FILES_NAMES
a : int = PRETRAINED_VOCAB_FILES_MAP
a : Any = PRETRAINED_INIT_CONFIGURATION
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Union[str, Any] = SqueezeBertTokenizer
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=True ,_lowerCamelCase="[UNK]" ,_lowerCamelCase="[SEP]" ,_lowerCamelCase="[PAD]" ,_lowerCamelCase="[CLS]" ,_lowerCamelCase="[MASK]" ,_lowerCamelCase=True ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> str:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,do_lower_case=_lowerCamelCase ,unk_token=_lowerCamelCase ,sep_token=_lowerCamelCase ,pad_token=_lowerCamelCase ,cls_token=_lowerCamelCase ,mask_token=_lowerCamelCase ,tokenize_chinese_chars=_lowerCamelCase ,strip_accents=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_lowerCamelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_lowerCamelCase ,normalizer_state.pop('''type''' ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_lowerCamelCase )
__lowercase = do_lower_case
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 56
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''PerceiverFeatureExtractor''']
_SCREAMING_SNAKE_CASE = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 56
| 1
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
super().__init__()
__lowercase = module
__lowercase = nn.Sequential(
nn.Linear(module.in_features ,_lowerCamelCase ,bias=_lowerCamelCase ) ,nn.Linear(_lowerCamelCase ,module.out_features ,bias=_lowerCamelCase ) ,)
__lowercase = (2.0 / (5 * min(module.in_features ,module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight ,std=_lowerCamelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCAmelCase (self ,_lowerCamelCase ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return self.module(_lowerCamelCase ,*_lowerCamelCase ,**_lowerCamelCase ) + self.adapter(_lowerCamelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
a : Any = "bigscience/bloom-1b7"
# Constant values
a : str = 2.1_09_65_95_52_69_25_74
a : Tuple = "Hello my name is"
a : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
a : List[str] = 10
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
super().setUp()
# Models and tokenizer
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,torch_dtype=torch.floataa ,device_map='''auto''' )
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.model_abit.config
self.assertTrue(hasattr(_lowerCamelCase ,'''quantization_config''' ) )
__lowercase = config.to_dict()
__lowercase = config.to_diff_dict()
__lowercase = config.to_json_string()
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__lowercase = self.model_fpaa.get_memory_footprint()
__lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit ,self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_lowerCamelCase ,torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' )
__lowercase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) ,max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] ,skip_special_tokens=_lowerCamelCase ) ,self.EXPECTED_OUTPUTS )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = BitsAndBytesConfig()
__lowercase = True
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,quantization_config=_lowerCamelCase ,device_map='''auto''' )
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' )
__lowercase = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) ,max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] ,skip_special_tokens=_lowerCamelCase ) ,self.EXPECTED_OUTPUTS )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
with self.assertRaises(_lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = BitsAndBytesConfig()
with self.assertRaises(_lowerCamelCase ):
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,quantization_config=_lowerCamelCase ,load_in_abit=_lowerCamelCase ,device_map='''auto''' ,bnb_abit_quant_type='''nf4''' ,)
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(_lowerCamelCase ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' )
__lowercase = self.model_fpaa.to(torch.floataa )
__lowercase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) ,max_new_tokens=10 )
# Check this does not throw an error
__lowercase = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
__lowercase = self.model_fpaa.half()
# Check this does not throw an error
__lowercase = self.model_fpaa.float()
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _UpperCAmelCase (cls ) -> List[Any]:
'''simple docstring'''
__lowercase = '''t5-small'''
__lowercase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase = AutoTokenizer.from_pretrained(cls.model_name )
__lowercase = '''Translate in German: Hello, my dog is cute'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
__lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase = None
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' ).to(0 )
__lowercase = model.generate(**_lowerCamelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' ).to(0 )
__lowercase = model.generate(**_lowerCamelCase )
__lowercase = modules
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q ,bnb.nn.Linearabit ) )
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' ).to(0 )
__lowercase = model.generate(**_lowerCamelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' ).to(0 )
__lowercase = model.generate(**_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# model_name
__lowercase = '''bigscience/bloom-560m'''
__lowercase = '''t5-small'''
# Different types of model
__lowercase = AutoModel.from_pretrained(self.model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
# Sequence classification model
__lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
# CausalLM model
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
# Seq2seq model
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name ,load_in_abit=_lowerCamelCase ,device_map='''auto''' )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
super().setUp()
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = pipeline(
'''text-generation''' ,model=self.model_name ,model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} ,max_new_tokens=self.MAX_NEW_TOKENS ,)
# Real second forward pass
__lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] ,self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,load_in_abit=_lowerCamelCase ,device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) ,{0, 1} )
# Check that inference pass works on the model
__lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''' )
# Second real batch
__lowercase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) ,max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] ,skip_special_tokens=_lowerCamelCase ) ,self.EXPECTED_OUTPUTS )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = '''facebook/opt-350m'''
super().setUp()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=_lowerCamelCase )
self.assertEqual(set(model.hf_device_map.values() ) ,{torch.cuda.current_device()} )
for param in model.parameters():
__lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_lowerCamelCase ) ):
__lowercase = LoRALayer(module.q_proj ,rank=16 )
__lowercase = LoRALayer(module.k_proj ,rank=16 )
__lowercase = LoRALayer(module.v_proj ,rank=16 )
# Step 3: dummy batch
__lowercase = self.tokenizer('''Test batch ''' ,return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase = model.forward(**_lowerCamelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_lowerCamelCase ,nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = "gpt2-xl"
a : Union[str, Any] = 3.31_91_85_48_54_15_21_87
| 56
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 7 , lowerCamelCase_ : int = 1_0_0_0_0_0_0 ):
__lowercase = 0
__lowercase = 1
for current_denominator in range(1 , limit + 1 ):
__lowercase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowercase = current_numerator
__lowercase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 56
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase_ )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowerCamelCase_ )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowerCamelCase_ , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None ):
__lowercase = load_checkpoint(lowerCamelCase_ )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowerCamelCase_ )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowerCamelCase_ ).half().eval()
model.load_state_dict(lowerCamelCase_ )
# Check results
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
__lowercase = len(lowerCamelCase_ )
for i in range(length - 1 ):
__lowercase = i
for k in range(i + 1 , lowerCamelCase_ ):
if collection[k] < collection[least]:
__lowercase = k
if least != i:
__lowercase , __lowercase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input('''Enter numbers separated by a comma:\n''').strip()
_SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 56
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 56
| 1
|
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : str = PriorTransformer
a : Optional[Any] = "hidden_states"
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = 4
__lowercase = 8
__lowercase = 7
__lowercase = floats_tensor((batch_size, embedding_dim) ).to(_lowerCamelCase )
__lowercase = floats_tensor((batch_size, embedding_dim) ).to(_lowerCamelCase )
__lowercase = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _UpperCAmelCase (self ,_lowerCamelCase=0 ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(_lowerCamelCase )
__lowercase = 4
__lowercase = 8
__lowercase = 7
__lowercase = torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
__lowercase = torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
__lowercase = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return (4, 8)
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
return (4, 8)
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase , __lowercase = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' ,output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(_lowerCamelCase )
__lowercase = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.prepare_init_args_and_inputs_for_common()
__lowercase = self.model_class(**_lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
__lowercase = model.to(_lowerCamelCase )
if hasattr(_lowerCamelCase ,'''set_default_attn_processor''' ):
model.set_default_attn_processor()
__lowercase = self.get_dummy_seed_input()
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )[0]
__lowercase = output[0, :5].flatten().cpu()
print(_lowerCamelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(_lowerCamelCase ,_lowerCamelCase ,rtol=1E-2 ) )
@slow
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ,_lowerCamelCase=1 ,_lowerCamelCase=768 ,_lowerCamelCase=77 ,_lowerCamelCase=0 ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(_lowerCamelCase )
__lowercase = batch_size
__lowercase = embedding_dim
__lowercase = num_embeddings
__lowercase = torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
__lowercase = torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
__lowercase = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' ,subfolder='''prior''' )
model.to(_lowerCamelCase )
__lowercase = self.get_dummy_seed_input(seed=_lowerCamelCase )
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )[0]
assert list(sample.shape ) == [1, 768]
__lowercase = sample[0, :8].flatten().cpu()
print(_lowerCamelCase )
__lowercase = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase ,_lowerCamelCase ,atol=1E-3 )
| 56
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase = "▁" ,_lowerCamelCase = True ,_lowerCamelCase = "<unk>" ,_lowerCamelCase = "</s>" ,_lowerCamelCase = "<pad>" ,) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['''token''']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) ,''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase )
__lowercase = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" ,special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] ,)
__lowercase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [files]
self._tokenizer.train(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
self._tokenizer.train_from_iterator(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['''unk''']['''id''']
__lowercase = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 56
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_SCREAMING_SNAKE_CASE = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''DPTFeatureExtractor''']
_SCREAMING_SNAKE_CASE = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = "vivit"
def __init__(self ,_lowerCamelCase=224 ,_lowerCamelCase=32 ,_lowerCamelCase=[2, 16, 16] ,_lowerCamelCase=3 ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu_fast" ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-0_6 ,_lowerCamelCase=True ,**_lowerCamelCase ,) -> List[str]:
'''simple docstring'''
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = num_frames
__lowercase = tubelet_size
__lowercase = num_channels
__lowercase = qkv_bias
super().__init__(**_lowerCamelCase )
| 56
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 56
| 1
|
'''simple docstring'''
import math
import unittest
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
with self.assertRaises(_lowerCamelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) ,'''Zero doesn\'t have any positive factors, primes must have exactly two.''' ,)
self.assertFalse(
is_prime(1 ) ,'''One only has 1 positive factor, primes must have exactly two.''' ,)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
| 1
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : bool = True , lowerCamelCase_ : float = math.inf , lowerCamelCase_ : float = -math.inf , lowerCamelCase_ : float = math.inf , lowerCamelCase_ : float = -math.inf , lowerCamelCase_ : bool = False , lowerCamelCase_ : float = 1_0_0 , lowerCamelCase_ : float = 0.01 , lowerCamelCase_ : float = 1 , ):
__lowercase = False
__lowercase = search_prob
__lowercase = start_temperate
__lowercase = []
__lowercase = 0
__lowercase = None
while not search_end:
__lowercase = current_state.score()
if best_state is None or current_score > best_state.score():
__lowercase = current_state
scores.append(lowerCamelCase_ )
iterations += 1
__lowercase = None
__lowercase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__lowercase = random.randint(0 , len(lowerCamelCase_ ) - 1 ) # picking a random neighbor
__lowercase = neighbors.pop(lowerCamelCase_ )
__lowercase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__lowercase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__lowercase = picked_neighbor
else:
__lowercase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__lowercase = picked_neighbor
__lowercase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__lowercase = True
else:
__lowercase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCamelCase_ ) , lowerCamelCase_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Dict ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_SCREAMING_SNAKE_CASE = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_SCREAMING_SNAKE_CASE = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] ):
return (3 * x**2) - (6 * y)
_SCREAMING_SNAKE_CASE = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
_SCREAMING_SNAKE_CASE = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[int] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[Any] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[int] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> str:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : List[str] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[int] = ["flax"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(self ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['''flax'''] )
| 56
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _lowerCAmelCase ( lowerCamelCase_ : list ):
if not postfix_notation:
return 0
__lowercase = {'''+''', '''-''', '''*''', '''/'''}
__lowercase = []
for token in postfix_notation:
if token in operations:
__lowercase , __lowercase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowerCamelCase_ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
| 1
|
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=13 ,_lowerCamelCase=7 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=99 ,_lowerCamelCase=32 ,_lowerCamelCase=5 ,_lowerCamelCase=4 ,_lowerCamelCase=37 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=50 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=True ,_lowerCamelCase=None ,) -> int:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = use_labels
__lowercase = scope
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=_lowerCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
__lowercase = BertGenerationEncoder(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ,) -> Any:
'''simple docstring'''
__lowercase = True
__lowercase = BertGenerationEncoder(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(
_lowerCamelCase ,attention_mask=_lowerCamelCase ,encoder_hidden_states=_lowerCamelCase ,encoder_attention_mask=_lowerCamelCase ,)
__lowercase = model(
_lowerCamelCase ,attention_mask=_lowerCamelCase ,encoder_hidden_states=_lowerCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ,) -> List[str]:
'''simple docstring'''
__lowercase = True
__lowercase = True
__lowercase = BertGenerationDecoder(config=_lowerCamelCase ).to(_lowerCamelCase ).eval()
# first forward pass
__lowercase = model(
_lowerCamelCase ,attention_mask=_lowerCamelCase ,encoder_hidden_states=_lowerCamelCase ,encoder_attention_mask=_lowerCamelCase ,use_cache=_lowerCamelCase ,)
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] ,dim=-1 )
__lowercase = model(
_lowerCamelCase ,attention_mask=_lowerCamelCase ,encoder_hidden_states=_lowerCamelCase ,encoder_attention_mask=_lowerCamelCase ,output_hidden_states=_lowerCamelCase ,)['''hidden_states'''][0]
__lowercase = model(
_lowerCamelCase ,attention_mask=_lowerCamelCase ,encoder_hidden_states=_lowerCamelCase ,encoder_attention_mask=_lowerCamelCase ,past_key_values=_lowerCamelCase ,output_hidden_states=_lowerCamelCase ,)['''hidden_states'''][0]
# select random slice
__lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ,) -> List[str]:
'''simple docstring'''
__lowercase = BertGenerationDecoder(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.prepare_config_and_inputs()
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : str = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a : Union[str, Any] = (BertGenerationDecoder,) if is_torch_available() else ()
a : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = BertGenerationEncoderTester(self )
__lowercase = ConfigTester(self ,config_class=_lowerCamelCase ,hidden_size=37 )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
__lowercase = '''bert'''
self.model_tester.create_and_check_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
__lowercase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__lowercase = model(_lowerCamelCase )[0]
__lowercase = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,_lowerCamelCase )
__lowercase = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_lowerCamelCase ,atol=1E-4 ) )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
__lowercase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__lowercase = model(_lowerCamelCase )[0]
__lowercase = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape ,_lowerCamelCase )
__lowercase = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_lowerCamelCase ,atol=1E-4 ) )
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56
| 1
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 56
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : list , lowerCamelCase_ : list , lowerCamelCase_ : int ):
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowercase = [p / w for p, w in zip(lowerCamelCase_ , lowerCamelCase_ )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowercase = sorted(lowerCamelCase_ )
# declaring useful variables
__lowercase = len(lowerCamelCase_ )
__lowercase = 0
__lowercase = 0
__lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowercase = sorted_profit_by_weight[length - i - 1]
__lowercase = profit_by_weight.index(lowerCamelCase_ )
__lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
_SCREAMING_SNAKE_CASE = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
_SCREAMING_SNAKE_CASE = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
_SCREAMING_SNAKE_CASE = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 56
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56
| 1
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_SCREAMING_SNAKE_CASE = {
'''vinai/phobert-base''': 2_5_6,
'''vinai/phobert-large''': 2_5_6,
}
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
__lowercase = set(lowerCamelCase_ )
return pairs
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = VOCAB_FILES_NAMES
a : Tuple = PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase="<s>" ,_lowerCamelCase="</s>" ,_lowerCamelCase="</s>" ,_lowerCamelCase="<s>" ,_lowerCamelCase="<unk>" ,_lowerCamelCase="<pad>" ,_lowerCamelCase="<mask>" ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,unk_token=_lowerCamelCase ,sep_token=_lowerCamelCase ,cls_token=_lowerCamelCase ,pad_token=_lowerCamelCase ,mask_token=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = vocab_file
__lowercase = merges_file
__lowercase = {}
__lowercase = 0
__lowercase = 1
__lowercase = 2
__lowercase = 3
self.add_from_file(_lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
with open(_lowerCamelCase ,encoding='''utf-8''' ) as merges_handle:
__lowercase = merges_handle.read().split('''\n''' )[:-1]
__lowercase = [tuple(merge.split()[:-1] ) for merge in merges]
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase ,token_ids_a=_lowerCamelCase ,already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
return len(self.encoder )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(_lowerCamelCase )
__lowercase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__lowercase = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(_lowerCamelCase ,key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(_lowerCamelCase ):
try:
__lowercase = word.index(_lowerCamelCase ,_lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(_lowerCamelCase )
__lowercase = new_word
if len(_lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(_lowerCamelCase )
__lowercase = '''@@ '''.join(_lowerCamelCase )
__lowercase = word[:-4]
__lowercase = word
return word
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = []
__lowercase = re.findall(R'''\S+\n?''' ,_lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
return self.encoder.get(_lowerCamelCase ,self.encoder.get(self.unk_token ) )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> str:
'''simple docstring'''
return self.decoder.get(_lowerCamelCase ,self.unk_token )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = ''' '''.join(_lowerCamelCase ).replace('''@@ ''' ,'''''' ).strip()
return out_string
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
_lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(
_lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file ,_lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.merges_file ,_lowerCamelCase )
return out_vocab_file, out_merge_file
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
try:
with open(_lowerCamelCase ,'''r''' ,encoding='''utf-8''' ) as fd:
self.add_from_file(_lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
__lowercase = f.readlines()
for lineTmp in lines:
__lowercase = lineTmp.strip()
__lowercase = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
__lowercase = line[:idx]
__lowercase = len(self.encoder )
| 56
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(_lowerCamelCase )
self.set_fail_transitions()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = 0
for character in keyword:
__lowercase = self.find_next_state(_lowerCamelCase ,_lowerCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__lowercase = len(self.adlist ) - 1
else:
__lowercase = next_state
self.adlist[current_state]["output"].append(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = deque()
for node in self.adlist[0]["next_states"]:
q.append(_lowerCamelCase )
__lowercase = 0
while q:
__lowercase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_lowerCamelCase )
__lowercase = self.adlist[r]['''fail_state''']
while (
self.find_next_state(_lowerCamelCase ,self.adlist[child]['''value'''] ) is None
and state != 0
):
__lowercase = self.adlist[state]['''fail_state''']
__lowercase = self.find_next_state(
_lowerCamelCase ,self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
__lowercase = 0
__lowercase = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def _UpperCAmelCase (self ,_lowerCamelCase ) -> dict[str, list[int]]:
'''simple docstring'''
__lowercase = {} # returns a dict with keywords and list of its occurrences
__lowercase = 0
for i in range(len(_lowerCamelCase ) ):
while (
self.find_next_state(_lowerCamelCase ,string[i] ) is None
and current_state != 0
):
__lowercase = self.adlist[current_state]['''fail_state''']
__lowercase = self.find_next_state(_lowerCamelCase ,string[i] )
if next_state is None:
__lowercase = 0
else:
__lowercase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__lowercase = []
result[key].append(i - len(_lowerCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
__lowercase = str(lowerCamelCase_ )
__lowercase = ''''''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _lowerCAmelCase ( lowerCamelCase_ : float = 9_9 ):
if not 0 < percent < 1_0_0:
raise ValueError('''solution() only accepts values from 0 to 100''' )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(9_9)}''')
| 56
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
| 1
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = question_encoder
__lowercase = generator
__lowercase = self.question_encoder
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if os.path.isfile(_lowerCamelCase ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,'''question_encoder_tokenizer''' )
__lowercase = os.path.join(_lowerCamelCase ,'''generator_tokenizer''' )
self.question_encoder.save_pretrained(_lowerCamelCase )
self.generator.save_pretrained(_lowerCamelCase )
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
__lowercase = kwargs.pop('''config''' ,_lowerCamelCase )
if config is None:
__lowercase = RagConfig.from_pretrained(_lowerCamelCase )
__lowercase = AutoTokenizer.from_pretrained(
_lowerCamelCase ,config=config.question_encoder ,subfolder='''question_encoder_tokenizer''' )
__lowercase = AutoTokenizer.from_pretrained(
_lowerCamelCase ,config=config.generator ,subfolder='''generator_tokenizer''' )
return cls(question_encoder=_lowerCamelCase ,generator=_lowerCamelCase )
def __call__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
return self.current_tokenizer(*_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return self.generator.batch_decode(*_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.generator.decode(*_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.question_encoder
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.generator
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = "longest" ,_lowerCamelCase = None ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' ,_lowerCamelCase ,)
if max_length is None:
__lowercase = self.current_tokenizer.model_max_length
__lowercase = self(
_lowerCamelCase ,add_special_tokens=_lowerCamelCase ,return_tensors=_lowerCamelCase ,max_length=_lowerCamelCase ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,**_lowerCamelCase ,)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowercase = self.current_tokenizer.model_max_length
__lowercase = self(
text_target=_lowerCamelCase ,add_special_tokens=_lowerCamelCase ,return_tensors=_lowerCamelCase ,padding=_lowerCamelCase ,max_length=_lowerCamelCase ,truncation=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = labels['''input_ids''']
return model_inputs
| 56
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Optional[Any] = OpenAIGPTTokenizer
a : int = OpenAIGPTTokenizerFast
a : Tuple = True
a : Any = False
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file ,'''w''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return "lower newer", "lower newer"
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file )
__lowercase = '''lower'''
__lowercase = ['''low''', '''er</w>''']
__lowercase = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = tokens + ['''<unk>''']
__lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase ,**_lowerCamelCase )
# Simple input
__lowercase = '''This is a simple input'''
__lowercase = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowercase = ('''This is a simple input''', '''This is a pair''')
__lowercase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_lowerCamelCase ,tokenizer_r.encode ,_lowerCamelCase ,max_length=_lowerCamelCase ,padding='''max_length''' )
# Simple input
self.assertRaises(_lowerCamelCase ,tokenizer_r.encode_plus ,_lowerCamelCase ,max_length=_lowerCamelCase ,padding='''max_length''' )
# Simple input
self.assertRaises(
_lowerCamelCase ,tokenizer_r.batch_encode_plus ,_lowerCamelCase ,max_length=_lowerCamelCase ,padding='''max_length''' ,)
# Pair input
self.assertRaises(_lowerCamelCase ,tokenizer_r.encode ,_lowerCamelCase ,max_length=_lowerCamelCase ,padding='''max_length''' )
# Pair input
self.assertRaises(_lowerCamelCase ,tokenizer_r.encode_plus ,_lowerCamelCase ,max_length=_lowerCamelCase ,padding='''max_length''' )
# Pair input
self.assertRaises(
_lowerCamelCase ,tokenizer_r.batch_encode_plus ,_lowerCamelCase ,max_length=_lowerCamelCase ,padding='''max_length''' ,)
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
| 56
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 1
|
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_SCREAMING_SNAKE_CASE = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_SCREAMING_SNAKE_CASE = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __lowercase :
'''simple docstring'''
def __init__(self ) -> List[str]:
'''simple docstring'''
__lowercase = WATERMARK_BITS
__lowercase = WatermarkEncoder()
self.encoder.set_watermark('''bits''' ,self.watermark )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
if images.shape[-1] < 256:
return images
__lowercase = (255 * (images / 2 + 0.5)).cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
__lowercase = [self.encoder.encode(_lowerCamelCase ,'''dwtDct''' ) for image in images]
__lowercase = torch.from_numpy(np.array(_lowerCamelCase ) ).permute(0 ,3 ,1 ,2 )
__lowercase = torch.clamp(2 * (images / 255 - 0.5) ,min=-1.0 ,max=1.0 )
return images
| 56
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 56
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = "bit"
a : List[str] = ["preactivation", "bottleneck"]
a : Optional[Any] = ["SAME", "VALID"]
def __init__(self ,_lowerCamelCase=3 ,_lowerCamelCase=64 ,_lowerCamelCase=[256, 512, 1024, 2048] ,_lowerCamelCase=[3, 4, 6, 3] ,_lowerCamelCase="preactivation" ,_lowerCamelCase="relu" ,_lowerCamelCase=None ,_lowerCamelCase=32 ,_lowerCamelCase=0.0 ,_lowerCamelCase=False ,_lowerCamelCase=32 ,_lowerCamelCase=1 ,_lowerCamelCase=None ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> int:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowercase = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
__lowercase = num_channels
__lowercase = embedding_size
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = layer_type
__lowercase = hidden_act
__lowercase = global_padding
__lowercase = num_groups
__lowercase = drop_path_rate
__lowercase = embedding_dynamic_padding
__lowercase = output_stride
__lowercase = width_factor
__lowercase = ['''stem'''] + [f"stage{idx}" for idx in range(1 ,len(_lowerCamelCase ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase ,out_indices=_lowerCamelCase ,stage_names=self.stage_names )
| 56
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56
| 1
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase = "▁" ,_lowerCamelCase = True ,_lowerCamelCase = "<unk>" ,_lowerCamelCase = "</s>" ,_lowerCamelCase = "<pad>" ,) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['''token''']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) ,''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase )
__lowercase = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" ,special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] ,)
__lowercase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [files]
self._tokenizer.train(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
self._tokenizer.train_from_iterator(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['''unk''']['''id''']
__lowercase = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 56
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase_ )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowerCamelCase_ )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowerCamelCase_ , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None ):
__lowercase = load_checkpoint(lowerCamelCase_ )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowerCamelCase_ )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowerCamelCase_ ).half().eval()
model.load_state_dict(lowerCamelCase_ )
# Check results
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 56
| 1
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return TrainCommand(lowerCamelCase_ )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = parser.add_parser('''train''' ,help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' ,)
train_parser.add_argument(
'''--column_label''' ,type=_lowerCamelCase ,default=0 ,help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' ,type=_lowerCamelCase ,default=1 ,help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' ,type=_lowerCamelCase ,default=2 ,help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' ,action='''store_true''' ,help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' ,type=_lowerCamelCase ,default='''''' ,help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' ,type=_lowerCamelCase ,default=0.1 ,help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' ,)
train_parser.add_argument('''--output''' ,type=_lowerCamelCase ,default='''./''' ,help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' ,type=_lowerCamelCase ,default='''text_classification''' ,help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' ,type=_lowerCamelCase ,default='''bert-base-uncased''' ,help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' ,type=_lowerCamelCase ,default=32 ,help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' ,type=_lowerCamelCase ,default=64 ,help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' ,type=_lowerCamelCase ,default=3E-5 ,help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' ,type=_lowerCamelCase ,default=1E-0_8 ,help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = logging.get_logger('''transformers-cli/training''' )
__lowercase = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output ,exist_ok=_lowerCamelCase )
__lowercase = args.output
__lowercase = args.column_label
__lowercase = args.column_text
__lowercase = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
__lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}" )
__lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
__lowercase = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}" )
__lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
__lowercase = args.validation_split
__lowercase = args.train_batch_size
__lowercase = args.valid_batch_size
__lowercase = args.learning_rate
__lowercase = args.adam_epsilon
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 56
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 56
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase = "▁" ,_lowerCamelCase = True ,_lowerCamelCase = "<unk>" ,_lowerCamelCase = "</s>" ,_lowerCamelCase = "<pad>" ,) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['''token''']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) ,''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase )
__lowercase = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" ,special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] ,)
__lowercase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [files]
self._tokenizer.train(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
self._tokenizer.train_from_iterator(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['''unk''']['''id''']
__lowercase = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 56
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = 0
for ch in input_str:
__lowercase = ord(lowerCamelCase_ )
__lowercase = pow(2 , lowerCamelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''camembert-base''': 5_1_2,
}
_SCREAMING_SNAKE_CASE = '''▁'''
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = VOCAB_FILES_NAMES
a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : int = ["input_ids", "attention_mask"]
a : Dict = CamembertTokenizer
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<s>" ,_lowerCamelCase="</s>" ,_lowerCamelCase="</s>" ,_lowerCamelCase="<s>" ,_lowerCamelCase="<unk>" ,_lowerCamelCase="<pad>" ,_lowerCamelCase="<mask>" ,_lowerCamelCase=["<s>NOTUSED", "</s>NOTUSED"] ,**_lowerCamelCase ,) -> Dict:
'''simple docstring'''
__lowercase = AddedToken(_lowerCamelCase ,lstrip=_lowerCamelCase ,rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase ,_lowerCamelCase ) else mask_token
super().__init__(
_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,sep_token=_lowerCamelCase ,cls_token=_lowerCamelCase ,unk_token=_lowerCamelCase ,pad_token=_lowerCamelCase ,mask_token=_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
_lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file ,_lowerCamelCase )
return (out_vocab_file,)
| 56
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 56
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''moussaKam/mbarthez''': 1_0_2_4,
'''moussaKam/barthez''': 1_0_2_4,
'''moussaKam/barthez-orangesum-title''': 1_0_2_4,
}
_SCREAMING_SNAKE_CASE = '''▁'''
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = VOCAB_FILES_NAMES
a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase ,_lowerCamelCase="<s>" ,_lowerCamelCase="</s>" ,_lowerCamelCase="</s>" ,_lowerCamelCase="<s>" ,_lowerCamelCase="<unk>" ,_lowerCamelCase="<pad>" ,_lowerCamelCase="<mask>" ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = AddedToken(_lowerCamelCase ,lstrip=_lowerCamelCase ,rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase ,_lowerCamelCase ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,unk_token=_lowerCamelCase ,sep_token=_lowerCamelCase ,cls_token=_lowerCamelCase ,pad_token=_lowerCamelCase ,mask_token=_lowerCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCamelCase ,)
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
__lowercase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
__lowercase = len(self.sp_model ) - 1
__lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase ,token_ids_a=_lowerCamelCase ,already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
return len(self.sp_model )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCamelCase ,out_type=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase = self.sp_model.PieceToId(_lowerCamelCase )
return spm_id if spm_id else self.unk_token_id
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = []
__lowercase = ''''''
__lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
__lowercase = True
__lowercase = []
else:
current_sub_tokens.append(_lowerCamelCase )
__lowercase = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def __getstate__(self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__(self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
_lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : list[str] ):
__lowercase = ''''''
for word_or_phrase in separated:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(lowerCamelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
| 1
|
'''simple docstring'''
from math import sqrt
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
__lowercase = True
# 0 and 1 are none primes.
if number <= 1:
__lowercase = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowercase = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowercase = list(range(2 , n + 1 ) )
__lowercase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowercase = 0
# filters actual prime numbers.
__lowercase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
__lowercase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
__lowercase = [] # this list will be returns of the function.
# potential prime number factors.
__lowercase = 2
__lowercase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(lowerCamelCase_ )
__lowercase = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(lowerCamelCase_ )
__lowercase = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
__lowercase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowercase = get_prime_numbers(lowerCamelCase_ )
__lowercase = len(lowerCamelCase_ )
# run variable for while-loops.
__lowercase = 0
__lowercase = None
# exit variable. for break up the loops
__lowercase = True
while i < len_pn and loop:
__lowercase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowercase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ):
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowercase = 0
while numbera != 0:
__lowercase = numbera % numbera
__lowercase = numbera
__lowercase = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] ):
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowercase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowercase = prime_factorization(lowerCamelCase_ )
__lowercase = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
__lowercase = []
__lowercase = []
__lowercase = max(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = 0
__lowercase = 0
__lowercase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowercase = prime_fac_a.count(lowerCamelCase_ )
__lowercase = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
__lowercase = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowercase = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : str ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
__lowercase = 0
__lowercase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] ):
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowercase = p_number_a + 1 # jump to the next number
__lowercase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
__lowercase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
__lowercase = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : List[str] ):
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowercase = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
__lowercase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
__lowercase = 0
__lowercase = 1
__lowercase = 1 # this will be return
for _ in range(n - 1 ):
__lowercase = ans
ans += fiba
__lowercase = tmp
return ans
| 56
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase_ : str ):
return [ord(lowerCamelCase_ ) - 9_6 for elem in plain]
def _lowerCAmelCase ( lowerCamelCase_ : list[int] ):
return "".join(chr(elem + 9_6 ) for elem in encoded )
def _lowerCAmelCase ( ):
__lowercase = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , lowerCamelCase_ )
print('''Decoded:''' , decode(lowerCamelCase_ ) )
if __name__ == "__main__":
main()
| 56
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[int] = "transfo-xl"
a : List[str] = ["mems"]
a : Tuple = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self ,_lowerCamelCase=267735 ,_lowerCamelCase=[20000, 40000, 200000] ,_lowerCamelCase=1024 ,_lowerCamelCase=1024 ,_lowerCamelCase=16 ,_lowerCamelCase=64 ,_lowerCamelCase=4096 ,_lowerCamelCase=4 ,_lowerCamelCase=False ,_lowerCamelCase=18 ,_lowerCamelCase=1600 ,_lowerCamelCase=1000 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=0 ,_lowerCamelCase=-1 ,_lowerCamelCase=True ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.0 ,_lowerCamelCase=True ,_lowerCamelCase="normal" ,_lowerCamelCase=0.0_1 ,_lowerCamelCase=0.0_1 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-5 ,_lowerCamelCase=0 ,**_lowerCamelCase ,) -> str:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = []
self.cutoffs.extend(_lowerCamelCase )
if proj_share_all_but_first:
__lowercase = [False] + [True] * len(self.cutoffs )
else:
__lowercase = [False] + [False] * len(self.cutoffs )
__lowercase = d_model
__lowercase = d_embed
__lowercase = d_head
__lowercase = d_inner
__lowercase = div_val
__lowercase = pre_lnorm
__lowercase = n_layer
__lowercase = n_head
__lowercase = mem_len
__lowercase = same_length
__lowercase = attn_type
__lowercase = clamp_len
__lowercase = sample_softmax
__lowercase = adaptive
__lowercase = dropout
__lowercase = dropatt
__lowercase = untie_r
__lowercase = init
__lowercase = init_range
__lowercase = proj_init_std
__lowercase = init_std
__lowercase = layer_norm_epsilon
super().__init__(eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
return base * power(lowerCamelCase_ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
_SCREAMING_SNAKE_CASE = int(input('''Enter the base: ''').strip())
_SCREAMING_SNAKE_CASE = int(input('''Enter the exponent: ''').strip())
_SCREAMING_SNAKE_CASE = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_SCREAMING_SNAKE_CASE = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Optional[Any] = TextToVideoSDPipeline
a : Optional[int] = TEXT_TO_IMAGE_PARAMS
a : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : str = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=32 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=_lowerCamelCase ,set_alpha_to_one=_lowerCamelCase ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='''gelu''' ,projection_dim=512 ,)
__lowercase = CLIPTextModel(_lowerCamelCase )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=0 ) -> str:
'''simple docstring'''
if str(_lowerCamelCase ).startswith('''mps''' ):
__lowercase = torch.manual_seed(_lowerCamelCase )
else:
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**_lowerCamelCase )
__lowercase = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = self.get_dummy_inputs(_lowerCamelCase )
__lowercase = '''np'''
__lowercase = sd_pipe(**_lowerCamelCase ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowerCamelCase ,expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowerCamelCase ,expected_max_diff=1E-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(_lowerCamelCase ,generator=_lowerCamelCase ,num_inference_steps=25 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(_lowerCamelCase ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 56
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''BeitFeatureExtractor''']
_SCREAMING_SNAKE_CASE = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
from random import choice
def _lowerCAmelCase ( lowerCamelCase_ : str ):
return choice(lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
__lowercase = random_pivot(lowerCamelCase_ )
# partition based on pivot
# linear time
__lowercase = [e for e in lst if e < pivot]
__lowercase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCamelCase_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCamelCase_ ) < k - 1:
return kth_number(lowerCamelCase_ , k - len(lowerCamelCase_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 56
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
| 1
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[int] = "masked_bert"
def __init__(self ,_lowerCamelCase=30522 ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=512 ,_lowerCamelCase=2 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-1_2 ,_lowerCamelCase=0 ,_lowerCamelCase="topK" ,_lowerCamelCase="constant" ,_lowerCamelCase=0.0 ,**_lowerCamelCase ,) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = pruning_method
__lowercase = mask_init
__lowercase = mask_scale
| 56
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 1
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
__lowercase = [[] for _ in range(lowerCamelCase_ )]
__lowercase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(lowerCamelCase_ ) <= key:
return input_string
for position, character in enumerate(lowerCamelCase_ ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase_ )
__lowercase = [''''''.join(lowerCamelCase_ ) for row in temp_grid]
__lowercase = ''''''.join(lowerCamelCase_ )
return output_string
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
__lowercase = []
__lowercase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
__lowercase = [[] for _ in range(lowerCamelCase_ )] # generates template
for position in range(len(lowerCamelCase_ ) ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
__lowercase = 0
for row in temp_grid: # fills in the characters
__lowercase = input_string[counter : counter + len(lowerCamelCase_ )]
grid.append(list(lowerCamelCase_ ) )
counter += len(lowerCamelCase_ )
__lowercase = '''''' # reads as zigzag
for position in range(len(lowerCamelCase_ ) ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
for key_guess in range(1 , len(lowerCamelCase_ ) ): # tries every key
__lowercase = decrypt(lowerCamelCase_ , lowerCamelCase_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCamelCase_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 56
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase_ )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowerCamelCase_ )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowerCamelCase_ , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None ):
__lowercase = load_checkpoint(lowerCamelCase_ )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowerCamelCase_ )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowerCamelCase_ ).half().eval()
model.load_state_dict(lowerCamelCase_ )
# Check results
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 56
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["PoolFormerFeatureExtractor"]
_SCREAMING_SNAKE_CASE = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 700
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 56
| 0
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 701
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase = "▁" ,_lowerCamelCase = True ,_lowerCamelCase = "<unk>" ,_lowerCamelCase = "</s>" ,_lowerCamelCase = "<pad>" ,) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['''token''']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) ,''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase )
__lowercase = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" ,special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] ,)
__lowercase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [files]
self._tokenizer.train(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
self._tokenizer.train_from_iterator(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['''unk''']['''id''']
__lowercase = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 56
| 0
|
from functools import lru_cache
@lru_cache
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 0
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_SCREAMING_SNAKE_CASE = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] = "dhaka" , lowerCamelCase_ : List[Any] = 5 ):
__lowercase = min(lowerCamelCase_ , 5_0 ) # Prevent abuse!
__lowercase = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
__lowercase = requests.get('''https://www.google.com/search''' , params=lowerCamelCase_ , headers=lowerCamelCase_ )
__lowercase = BeautifulSoup(html.text , '''html.parser''' )
__lowercase = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
__lowercase = json.dumps(lowerCamelCase_ )
__lowercase = json.loads(lowerCamelCase_ )
__lowercase = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , lowerCamelCase_ , )
if not matched_google_image_data:
return 0
__lowercase = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(lowerCamelCase_ ) , )
__lowercase = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , lowerCamelCase_ , )
for index, fixed_full_res_image in enumerate(lowerCamelCase_ ):
if index >= max_images:
return index
__lowercase = bytes(lowerCamelCase_ , '''ascii''' ).decode(
'''unicode-escape''' )
__lowercase = bytes(lowerCamelCase_ , '''ascii''' ).decode(
'''unicode-escape''' )
__lowercase = urllib.request.build_opener()
__lowercase = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(lowerCamelCase_ )
__lowercase = f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
urllib.request.urlretrieve( # noqa: S310
lowerCamelCase_ , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
_SCREAMING_SNAKE_CASE = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('''Please provide a search term.''')
raise
| 703
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56
| 0
|
import math
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] ):
if (
not isinstance(_lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] ):
if (
not isinstance(_lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
| 0
|
from datetime import datetime
import requests
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
__lowercase = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(_A ).content
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input('''Enter Video/IGTV url: ''').strip()
_SCREAMING_SNAKE_CASE = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 705
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 56
| 0
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class __lowercase ( __lowerCamelCase ):
'''simple docstring'''
a : List[str] = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a : List[Any] = Features({"text": Value("string" )} )
a : str = Features({"labels": ClassLabel} )
a : List[Any] = "text"
a : Optional[Any] = "labels"
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] ,UpperCAmelCase_ ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
__lowercase = copy.deepcopy(self )
__lowercase = self.label_schema.copy()
__lowercase = features[self.label_column]
__lowercase = label_schema
return task_template
@property
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 706
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
| 0
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a : Dict = GPTSanJapaneseTokenizer
a : int = False
a : Union[str, Any] = {'do_clean_text': False, 'add_prefix_space': False}
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
__lowercase = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
__lowercase = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
__lowercase = {'unk_token': '<unk>'}
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file ,'''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(_lowercase ) )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname ,**_lowercase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
__lowercase = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_input_output_texts(_lowercase )
__lowercase = tokenizer.encode(_lowercase ,add_special_tokens=_lowercase )
__lowercase = tokenizer.decode(_lowercase ,clean_up_tokenization_spaces=_lowercase )
return text, ids
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# Testing tokenization
__lowercase = 'こんにちは、世界。 こんばんは、㔺界。'
__lowercase = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
__lowercase = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
# Testing conversion to ids without special tokens
__lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__lowercase = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
# Testing conversion to ids with special tokens
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__lowercase = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# Testing tokenization
__lowercase = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
__lowercase = 'こんにちは、、、、世界。こんばんは、、、、世界。'
__lowercase = tokenizer.encode(_lowercase )
__lowercase = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase ,_lowercase )
@slow
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__lowercase = 'こんにちは、世界。'
__lowercase = 'こんばんは、㔺界。😀'
__lowercase = 'こんにちは、世界。こんばんは、世界。😀'
__lowercase = tokenizer.encode(prefix_text + input_text )
__lowercase = tokenizer.encode('''''' ,prefix_text=prefix_text + input_text )
__lowercase = tokenizer.encode(_lowercase ,prefix_text=_lowercase )
__lowercase = tokenizer.decode(_lowercase )
__lowercase = tokenizer.decode(_lowercase )
__lowercase = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase ,_lowercase )
self.assertEqual(_lowercase ,_lowercase )
self.assertEqual(_lowercase ,_lowercase )
@slow
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__lowercase = 'こんにちは、世界。'
__lowercase = 'こんばんは、㔺界。😀'
__lowercase = len(tokenizer.encode(_lowercase ) ) - 2
__lowercase = len(tokenizer.encode(_lowercase ) ) - 2
__lowercase = [1] + [0] * (len_prefix + len_text + 1)
__lowercase = [1] * (len_prefix + len_text + 1) + [0]
__lowercase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__lowercase = tokenizer(prefix_text + input_text ).token_type_ids
__lowercase = tokenizer('''''' ,prefix_text=prefix_text + input_text ).token_type_ids
__lowercase = tokenizer(_lowercase ,prefix_text=_lowercase ).token_type_ids
self.assertListEqual(_lowercase ,_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
@slow
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__lowercase = tokenizer.encode('''あンいワ''' )
__lowercase = tokenizer.encode('''''' ,prefix_text='''あンいワ''' )
__lowercase = tokenizer.encode('''いワ''' ,prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(_lowercase ) ,tokenizer.decode(_lowercase ) )
self.assertEqual(tokenizer.decode(_lowercase ) ,tokenizer.decode(_lowercase ) )
self.assertNotEqual(_lowercase ,_lowercase )
self.assertNotEqual(_lowercase ,_lowercase )
self.assertEqual(x_token_a[1] ,x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] ,x_token_a[3] ) # SEG token
@slow
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__lowercase = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
__lowercase = tokenizer(_lowercase ,padding=_lowercase )
__lowercase = tokenizer.batch_encode_plus(_lowercase ,padding=_lowercase )
# fmt: off
__lowercase = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__lowercase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__lowercase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids ,_lowercase )
self.assertListEqual(x_token.token_type_ids ,_lowercase )
self.assertListEqual(x_token.attention_mask ,_lowercase )
self.assertListEqual(x_token_a.input_ids ,_lowercase )
self.assertListEqual(x_token_a.token_type_ids ,_lowercase )
self.assertListEqual(x_token_a.attention_mask ,_lowercase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
pass
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
| 0
|
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( __A ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCamelCase ,scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__(self ,_lowerCamelCase = 1 ,_lowerCamelCase = None ,_lowerCamelCase = 50 ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__lowercase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,generator=_lowerCamelCase ,)
__lowercase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowercase = self.unet(_lowerCamelCase ,_lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowercase = self.scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ).prev_sample
__lowercase = (image / 2 + 0.5).clamp(0 ,1 )
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_lowerCamelCase ), "This is a local test"
| 708
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56
| 0
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 709
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
| 0
|
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(self ,['''bs4'''] )
super().__init__(**UpperCamelCase_ )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = []
__lowercase = []
__lowercase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__lowercase = parent.find_all(child.name ,recursive=UpperCamelCase_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase_ ) else next(i for i, s in enumerate(UpperCamelCase_ ,1 ) if s is child ) )
__lowercase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = BeautifulSoup(UpperCamelCase_ ,'''html.parser''' )
__lowercase = []
__lowercase = []
__lowercase = []
for element in html_code.descendants:
if type(UpperCamelCase_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__lowercase = html.unescape(UpperCamelCase_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase_ )
__lowercase = self.xpath_soup(UpperCamelCase_ )
stringaxtag_seq.append(UpperCamelCase_ )
stringaxsubs_seq.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = ""
for tagname, subs in zip(UpperCamelCase_ ,UpperCamelCase_ ):
xpath += f"/{tagname}"
if subs != 0:
xpath += f"[{subs}]"
return xpath
def __call__(self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = False
# Check that strings has a valid type
if isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
__lowercase = True
elif isinstance(UpperCamelCase_ ,(list, tuple) ):
if len(UpperCamelCase_ ) == 0 or isinstance(html_strings[0] ,UpperCamelCase_ ):
__lowercase = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"but is of type {type(UpperCamelCase_ )}." )
__lowercase = bool(isinstance(UpperCamelCase_ ,(list, tuple) ) and (isinstance(html_strings[0] ,UpperCamelCase_ )) )
if not is_batched:
__lowercase = [html_strings]
# Get nodes + xpaths
__lowercase = []
__lowercase = []
for html_string in html_strings:
__lowercase = self.get_three_from_single(UpperCamelCase_ )
nodes.append(UpperCamelCase_ )
__lowercase = []
for node, tag_list, sub_list in zip(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
__lowercase = self.construct_xpath(UpperCamelCase_ ,UpperCamelCase_ )
xpath_strings.append(UpperCamelCase_ )
xpaths.append(UpperCamelCase_ )
# return as Dict
__lowercase = {"nodes": nodes, "xpaths": xpaths}
__lowercase = BatchFeature(data=UpperCamelCase_ ,tensor_type=UpperCamelCase_ )
return encoded_inputs
| 710
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56
| 0
|
import requests
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__lowercase = {"Content-Type": "application/json"}
__lowercase = requests.post(_lowerCAmelCase , json={'''text''': message_body} , headers=_lowerCAmelCase )
if response.status_code != 2_0_0:
__lowercase = (
"Request to slack returned an error "
f"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 711
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 0
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
_SCREAMING_SNAKE_CASE = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
'''simple docstring'''
__lowercase = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ):
'''simple docstring'''
if set(__lowerCAmelCase ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__lowercase = """"""
for word in coded.split():
while len(__lowerCAmelCase ) != 0:
decoded += decode_dict[word[:5]]
__lowercase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 712
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56
| 0
|
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
_SCREAMING_SNAKE_CASE = '''src/transformers'''
# Matches is_xxx_available()
_SCREAMING_SNAKE_CASE = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_SCREAMING_SNAKE_CASE = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_SCREAMING_SNAKE_CASE = re.compile(R'''\s+\"\S*\":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_SCREAMING_SNAKE_CASE = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_SCREAMING_SNAKE_CASE = re.compile(R'''^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_SCREAMING_SNAKE_CASE = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_SCREAMING_SNAKE_CASE = re.compile('''^\s+\"([^\"]+)\",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_SCREAMING_SNAKE_CASE = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_SCREAMING_SNAKE_CASE = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_SCREAMING_SNAKE_CASE = re.compile(R'''^\s*try:''')
# Catches a line with else:
_SCREAMING_SNAKE_CASE = re.compile(R'''^\s*else:''')
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
if _re_test_backend.search(__UpperCAmelCase ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(__UpperCAmelCase )]
backends.sort()
return "_and_".join(__UpperCAmelCase )
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(__UpperCAmelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCAmelCase ):
__lowercase = _re_one_line_import_struct.search(__UpperCAmelCase ).groups()[0]
__lowercase = re.findall('''\[([^\]]+)\]''' , __UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(__UpperCAmelCase )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif line.startswith(''' ''' * 8 + '''\"''' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(__UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCAmelCase ) is not None:
__lowercase = _re_import_struct_add_many.search(__UpperCAmelCase ).groups()[0].split(''', ''' )
__lowercase = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_between_brackets.search(__UpperCAmelCase ) is not None:
__lowercase = _re_between_brackets.search(__UpperCAmelCase ).groups()[0].split(''', ''' )
__lowercase = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_quote_object.search(__UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCAmelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''\"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''\"''' ):
objects.append(line[1_3:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(__UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ):
def find_duplicates(lowerCamelCase_ : int ):
return [k for k, v in collections.Counter(__UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = '''base imports''' if key == '''none''' else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _lowerCAmelCase ( ):
__lowercase = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
__lowercase = os.path.join(__UpperCAmelCase , '''__init__.py''' )
__lowercase = parse_init(__UpperCAmelCase )
if objects is not None:
__lowercase = analyze_results(*__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
__lowercase = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append('''\n'''.join(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
raise ValueError('''\n\n'''.join(__UpperCAmelCase ) )
def _lowerCAmelCase ( ):
__lowercase = []
for path, directories, files in os.walk(__UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCAmelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
__lowercase = str((Path(__UpperCAmelCase ) / folder).relative_to(__UpperCAmelCase ) )
__lowercase = short_path.replace(os.path.sep , '''.''' )
submodules.append(__UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(__UpperCAmelCase ) / fname).relative_to(__UpperCAmelCase ) )
__lowercase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__UpperCAmelCase )
return submodules
_SCREAMING_SNAKE_CASE = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _lowerCAmelCase ( ):
__lowercase = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(__UpperCAmelCase , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__UpperCAmelCase ) > 0:
__lowercase = '''\n'''.join(f"- {module}" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f"{list_of_modules}\n"
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 713
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
| 0
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 0
|
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
a : Dict = ["torch", "transformers", "onnx"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class __lowercase ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
a : Optional[Any] = ["torch", "transformers", "onnx"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class __lowercase ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
a : Union[str, Any] = ["torch", "transformers", "onnx"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class __lowercase ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
a : Any = ["torch", "transformers", "onnx"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> str:
'''simple docstring'''
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class __lowercase ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
a : Optional[Any] = ["torch", "transformers", "onnx"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class __lowercase ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
a : Optional[int] = ["torch", "transformers", "onnx"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
| 715
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_SCREAMING_SNAKE_CASE : Dict = {'''facebook/blenderbot-3B''': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _lowerCAmelCase ( ):
__lowercase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class __lowercase ( a__ ):
'''simple docstring'''
a : Any = VOCAB_FILES_NAMES
a : str = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Dict = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase="replace" ,_lowerCamelCase="<s>" ,_lowerCamelCase="</s>" ,_lowerCamelCase="</s>" ,_lowerCamelCase="<s>" ,_lowerCamelCase="<unk>" ,_lowerCamelCase="<pad>" ,_lowerCamelCase="<mask>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Any:
'''simple docstring'''
__lowercase = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else bos_token
__lowercase = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else eos_token
__lowercase = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else sep_token
__lowercase = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else cls_token
__lowercase = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else unk_token
__lowercase = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
super().__init__(
errors=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,cls_token=_A ,pad_token=_A ,mask_token=_A ,add_prefix_space=_A ,**_A ,)
with open(_A ,encoding='''utf-8''' ) as vocab_handle:
__lowercase = json.load(_A )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(_A ,encoding='''utf-8''' ) as merges_handle:
__lowercase = merges_handle.read().split('''\n''' )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(_A ,range(len(_A ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return len(self.encoder )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(_A )
__lowercase = get_pairs(_A )
if not pairs:
return token
while True:
__lowercase = min(_A ,key=lambda _lowerCamelCase : self.bpe_ranks.get(_A ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(_A ):
try:
__lowercase = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(_A )
__lowercase = new_word
if len(_A ) == 1:
break
else:
__lowercase = get_pairs(_A )
__lowercase = ' '.join(_A )
__lowercase = word
return word
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat ,_A ):
__lowercase = ''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(''' ''' ) )
return bpe_tokens
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
return self.decoder.get(_A )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = ''.join(_A )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' ,errors=self.errors )
return text
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple:
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
_A ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(
_A ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_A ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '''\n''' )
__lowercase = 0
with open(_A ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
__lowercase = token_index
writer.write(''' '''.join(_A ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = False ) -> Dict:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=False ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = kwargs.pop('''add_prefix_space''' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
__lowercase = ' ' + text
return (text, kwargs)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> int:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_A )
__lowercase = ' '.join(_A )
__lowercase = self.encode(_A )
if len(_A ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 716
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[str] = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 0
|
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __lowercase ( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
a : Tuple = BertJapaneseTokenizer
a : int = False
a : int = True
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
super().setUp()
__lowercase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''こんにちは、世界。 \nこんばんは、世界。'''
__lowercase = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = self.get_input_output_texts(lowerCamelCase_ )
__lowercase = tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
__lowercase = tokenizer.decode(lowerCamelCase_ ,clean_up_tokenization_spaces=lowerCamelCase_ )
return text, ids
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass # TODO add if relevant
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
pass # TODO add if relevant
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
pass # TODO add if relevant
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(lowerCamelCase_ ,['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='''mecab''' )
self.assertIsNotNone(lowerCamelCase_ )
__lowercase = '''こんにちは、世界。\nこんばんは、世界。'''
__lowercase = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase = os.path.join(self.tmpdirname ,'''tokenizer.bin''' )
with open(lowerCamelCase_ ,'''wb''' ) as handle:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
with open(lowerCamelCase_ ,'''rb''' ) as handle:
__lowercase = pickle.load(lowerCamelCase_ )
__lowercase = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
try:
__lowercase = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
try:
__lowercase = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = MecabTokenizer(do_lower_case=lowerCamelCase_ ,mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
try:
__lowercase = MecabTokenizer(
do_lower_case=lowerCamelCase_ ,normalize_text=lowerCamelCase_ ,mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] ,)
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = MecabTokenizer(normalize_text=lowerCamelCase_ ,mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] ,)
@require_sudachi
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(lowerCamelCase_ )
__lowercase = '''こんにちは、世界。\nこんばんは、世界。'''
__lowercase = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase = os.path.join(self.tmpdirname ,'''tokenizer.bin''' )
with open(lowerCamelCase_ ,'''wb''' ) as handle:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
with open(lowerCamelCase_ ,'''rb''' ) as handle:
__lowercase = pickle.load(lowerCamelCase_ )
__lowercase = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
@require_sudachi
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,[''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] ,)
@require_sudachi
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' ,sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) ,['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' ,sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) ,['''外国人''', '''参政権'''] )
@require_sudachi
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' ,sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) ,['''外国人参政権'''] )
@require_sudachi
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = SudachiTokenizer(do_lower_case=lowerCamelCase_ ,sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,[''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] ,)
@require_sudachi
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = SudachiTokenizer(normalize_text=lowerCamelCase_ ,sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,[''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] ,)
@require_sudachi
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = SudachiTokenizer(trim_whitespace=lowerCamelCase_ ,sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
@require_jumanpp
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(lowerCamelCase_ )
__lowercase = '''こんにちは、世界。\nこんばんは、世界。'''
__lowercase = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase = os.path.join(self.tmpdirname ,'''tokenizer.bin''' )
with open(lowerCamelCase_ ,'''wb''' ) as handle:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
with open(lowerCamelCase_ ,'''rb''' ) as handle:
__lowercase = pickle.load(lowerCamelCase_ )
__lowercase = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
@require_jumanpp
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] ,)
@require_jumanpp
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = JumanppTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] ,)
@require_jumanpp
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = JumanppTokenizer(normalize_text=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] ,)
@require_jumanpp
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = JumanppTokenizer(trim_whitespace=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] ,)
@require_jumanpp
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) ,['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] ,)
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
__lowercase = {}
for i, token in enumerate(lowerCamelCase_ ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=lowerCamelCase_ ,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) ,[] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) ,['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) ,['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) ,['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
__lowercase = tokenizer.subword_tokenizer
__lowercase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(lowerCamelCase_ ,['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
__lowercase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(lowerCamelCase_ ,['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
__lowercase = tokenizer.encode('''ありがとう。''' ,add_special_tokens=lowerCamelCase_ )
__lowercase = tokenizer.encode('''どういたしまして。''' ,add_special_tokens=lowerCamelCase_ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ,lowerCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowercase ( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
a : Dict = BertJapaneseTokenizer
a : Dict = False
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().setUp()
__lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname ,subword_tokenizer_type='''character''' ,**lowerCamelCase_ )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = '''こんにちは、世界。 \nこんばんは、世界。'''
__lowercase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
pass # TODO add if relevant
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
pass # TODO add if relevant
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file ,subword_tokenizer_type='''character''' )
__lowercase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
lowerCamelCase_ ,['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__lowercase = {}
for i, token in enumerate(lowerCamelCase_ ):
__lowercase = i
__lowercase = CharacterTokenizer(vocab=lowerCamelCase_ ,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) ,[] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) ,['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) ,['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
__lowercase = tokenizer.encode('''ありがとう。''' ,add_special_tokens=lowerCamelCase_ )
__lowercase = tokenizer.encode('''どういたしまして。''' ,add_special_tokens=lowerCamelCase_ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ,lowerCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = '''cl-tohoku/bert-base-japanese'''
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' ,level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
__lowercase = '''bert-base-cased'''
with self.assertLogs('''transformers''' ,level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 718
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
| 0
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
assert isinstance(_A , _A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(_A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any]=1 , **lowerCamelCase_ : int ):
__lowercase = factor * value
__lowercase = value
while not is_prime(_A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_A )
return value
| 719
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 56
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56
| 0
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
debug_launcher(test_script.main )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
debug_launcher(test_ops.main )
| 721
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase_ )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowerCamelCase_ )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowerCamelCase_ , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None ):
__lowercase = load_checkpoint(lowerCamelCase_ )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowerCamelCase_ )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowerCamelCase_ ).half().eval()
model.load_state_dict(lowerCamelCase_ )
# Check results
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 56
| 0
|
'''simple docstring'''
import logging
import os
from .state import PartialState
class __lowercase ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__lowercase = kwargs.pop('''main_process_only''' ,A_ )
__lowercase = kwargs.pop('''in_order''' ,A_ )
if self.isEnabledFor(A_ ):
if self._should_log(A_ ):
__lowercase , __lowercase = self.process(A_ ,A_ )
self.logger.log(A_ ,A_ ,*A_ ,**A_ )
elif in_order:
__lowercase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowercase , __lowercase = self.process(A_ ,A_ )
self.logger.log(A_ ,A_ ,*A_ ,**A_ )
state.wait_for_everyone()
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str = None ):
if log_level is None:
__lowercase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , snake_case__ )
__lowercase = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__ , {} )
| 700
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 56
| 0
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_SCREAMING_SNAKE_CASE = getLogger(__name__)
_SCREAMING_SNAKE_CASE = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] = 8 , lowerCamelCase_ : List[str] = DEFAULT_DEVICE , lowerCamelCase_ : Dict=False , lowerCamelCase_ : List[str]="summarization" , lowerCamelCase_ : Optional[Any]=None , **lowerCamelCase_ : str , ):
__lowercase = Path(__SCREAMING_SNAKE_CASE ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(__SCREAMING_SNAKE_CASE )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , truncation=__SCREAMING_SNAKE_CASE , padding='''longest''' ).to(__SCREAMING_SNAKE_CASE )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__SCREAMING_SNAKE_CASE , )
__lowercase = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(__SCREAMING_SNAKE_CASE )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _lowerCAmelCase ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _lowerCAmelCase ( lowerCamelCase_ : List[str]=True ):
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=__SCREAMING_SNAKE_CASE , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=__SCREAMING_SNAKE_CASE , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=__SCREAMING_SNAKE_CASE , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=__SCREAMING_SNAKE_CASE , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=__SCREAMING_SNAKE_CASE , default=8 , required=__SCREAMING_SNAKE_CASE , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=__SCREAMING_SNAKE_CASE , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(__SCREAMING_SNAKE_CASE )
if parsed_args and verbose:
print(f"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
__SCREAMING_SNAKE_CASE , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__SCREAMING_SNAKE_CASE , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__SCREAMING_SNAKE_CASE )]
__lowercase = score_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
scores.update(__SCREAMING_SNAKE_CASE )
if args.dump_args:
scores.update(__SCREAMING_SNAKE_CASE )
if args.info:
__lowercase = args.info
if verbose:
print(__SCREAMING_SNAKE_CASE )
if args.score_path is not None:
json.dump(__SCREAMING_SNAKE_CASE , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 701
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase = "▁" ,_lowerCamelCase = True ,_lowerCamelCase = "<unk>" ,_lowerCamelCase = "</s>" ,_lowerCamelCase = "<pad>" ,) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['''token''']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) ,''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase )
__lowercase = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" ,special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] ,)
__lowercase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [files]
self._tokenizer.train(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
self._tokenizer.train_from_iterator(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['''unk''']['''id''']
__lowercase = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 56
| 0
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = FunnelTokenizer
a : Union[str, Any] = FunnelTokenizerFast
a : Optional[Any] = True
a : Optional[int] = True
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
super().setUp()
__lowercase = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = '''UNwant\u00E9d,running'''
__lowercase = '''unwanted, running'''
return input_text, output_text
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowerCamelCase ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,[7, 4, 5, 10, 8, 9] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
__lowercase = tokenizer('''UNwant\u00E9d,running''' )
__lowercase = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] ,[2] + [0] * sentence_len )
__lowercase = tokenizer('''UNwant\u00E9d,running''' ,'''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] ,[2] + [0] * sentence_len + [1] * sentence_len )
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 0
|
def _lowerCAmelCase ( lowerCamelCase_ : int = 4_0_0_0_0_0_0 ):
__lowercase = []
__lowercase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case_ )
__lowercase = b, a + b
return sum(snake_case_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 703
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56
| 0
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_SCREAMING_SNAKE_CASE = True
from torch.cuda.amp import autocast
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[Any] = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a : int = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a : Any = field(
default=lowercase__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
a : Optional[Any] = field(
default=lowercase__ , metadata={"help": "Whether to log verbose messages or not."} , )
a : str = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
a : int = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
a : List[str] = field(
default=0.99_99_95 , metadata={"help": "Decay of gumbel temperature during training."} )
def _lowerCAmelCase ( lowerCamelCase_ : ModelArguments , lowerCamelCase_ : TrainingArguments ):
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowercase = logging.WARNING
if model_args.verbose_logging:
__lowercase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__lowercase = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class __lowercase :
'''simple docstring'''
a : List[str] = field(
default=lowercase__ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
a : List[Any] = field(
default=lowercase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a : Any = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
a : str = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
a : str = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
a : Tuple = field(
default=lowercase__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
a : Tuple = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
a : Optional[Any] = field(
default=lowercase__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
a : Dict = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class __lowercase :
'''simple docstring'''
a : Dict = 42
a : List[Any] = 42
a : int = "longest"
a : Tuple = None
a : Union[str, Any] = None
def __call__(self ,_lowerCamelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
__lowercase = self.feature_extractor.pad(
_lowerCamelCase ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='''pt''' ,)
__lowercase = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
__lowercase = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__lowercase = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
__lowercase = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__lowercase = 1
__lowercase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__lowercase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=_lowerCamelCase ,min_masks=2 ,)
return batch
class __lowercase ( lowercase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,_lowerCamelCase=1 ,_lowerCamelCase=0 ,_lowerCamelCase=1.0 ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
__lowercase = 0
__lowercase = max_gumbel_temp
__lowercase = min_gumbel_temp
__lowercase = gumbel_temp_decay
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> torch.Tensor:
'''simple docstring'''
model.train()
__lowercase = self._prepare_inputs(_lowerCamelCase )
if self.use_amp:
with autocast():
__lowercase = self.compute_loss(_lowerCamelCase ,_lowerCamelCase )
else:
__lowercase = self.compute_loss(_lowerCamelCase ,_lowerCamelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__lowercase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowercase = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
__lowercase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_lowerCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_lowerCamelCase ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_lowerCamelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def _lowerCAmelCase ( ):
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
__lowercase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__lowercase = DatasetDict()
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__lowercase = DatasetDict()
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(lowerCamelCase_ : Any ):
# check that all files have the correct sampling rate
__lowercase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__lowercase = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
__lowercase = vectorized_datasets.filter(
lambda lowerCamelCase_ : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowerCamelCase_ : Union[str, Any] ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__lowercase = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__lowercase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
__lowercase = WavaVecaForPreTraining(_lowerCamelCase )
__lowercase = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
__lowercase = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 704
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 56
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=__a ):
'''simple docstring'''
a : Tuple = ["note_seq"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['''note_seq'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['''note_seq'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['''note_seq'''] )
| 706
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
| 0
|
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ):
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
__lowercase = sum(__snake_case ) / len(__snake_case ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
| 0
|
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
return "\n".join(
f"{number} * {i} = {number * i}" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 708
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56
| 0
|
'''simple docstring'''
from maths.prime_check import is_prime
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if not isinstance(_lowercase , _lowercase ):
__lowercase = f"Input value of [number={number}] must be an integer"
raise TypeError(_lowercase )
if is_prime(_lowercase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
| 0
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
__lowercase = 0
__lowercase = len(UpperCAmelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowercase = i + 1
else:
__lowercase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 710
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56
| 0
|
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] = None , lowerCamelCase_ : List[str] = None ):
if start is None:
__lowercase = 0
if end is None:
__lowercase = len(_snake_case ) - 1
if start >= end:
return
__lowercase = (start + end) // 2
slowsort(_snake_case , _snake_case , _snake_case )
slowsort(_snake_case , mid + 1 , _snake_case )
if sequence[end] < sequence[mid]:
__lowercase , __lowercase = sequence[mid], sequence[end]
slowsort(_snake_case , _snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 711
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : Dict = 1_0_0_0_0_0_0 ):
'''simple docstring'''
__lowercase = set(range(3 , _lowercase , 2 ) )
primes.add(2 )
for p in range(3 , _lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowercase , _lowercase ) ) )
__lowercase = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase , limit + 1 , _lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 712
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ):
__lowercase = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
__lowercase = 6
__lowercase = 1_2_8
__lowercase = (2, 2, 1_8, 2)
__lowercase = (4, 8, 1_6, 3_2)
elif "large" in model_name:
__lowercase = 1_2
__lowercase = 1_9_2
__lowercase = (2, 2, 1_8, 2)
__lowercase = (6, 1_2, 2_4, 4_8)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__lowercase = window_size
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
return config
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
if "encoder.mask_token" in name:
__lowercase = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__lowercase = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__lowercase = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__lowercase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__lowercase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__lowercase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__lowercase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__lowercase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__lowercase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__lowercase = '''layernorm.weight'''
if name == "encoder.norm.bias":
__lowercase = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__lowercase = '''swin.''' + name
return name
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ):
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(__UpperCamelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
__lowercase = key.split('''.''' )
__lowercase = int(key_split[2] )
__lowercase = int(key_split[4] )
__lowercase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[
dim : dim * 2, :
]
__lowercase = val[-dim:, :]
else:
__lowercase = val[
:dim
]
__lowercase = val[
dim : dim * 2
]
__lowercase = val[
-dim:
]
else:
__lowercase = val
return orig_state_dict
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
__lowercase = torch.load(__UpperCamelCase , map_location='''cpu''' )['''model''']
__lowercase = get_swin_config(__UpperCamelCase )
__lowercase = SwinForMaskedImageModeling(__UpperCamelCase )
model.eval()
__lowercase = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
__lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase = ViTImageProcessor(size={'''height''': 1_9_2, '''width''': 1_9_2} )
__lowercase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
__lowercase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' )
with torch.no_grad():
__lowercase = model(**__UpperCamelCase ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f"Pushing model and image processor for {model_name} to hub" )
model.push_to_hub(f"microsoft/{model_name}" )
image_processor.push_to_hub(f"microsoft/{model_name}" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 713
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
| 0
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] , **lowerCamelCase_ : List[str] ):
__lowercase = AutoConfig.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
__lowercase = AutoModelForSeqaSeqLM.from_config(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
AutoTokenizer.from_pretrained(lowerCamelCase_ ).save_pretrained(lowerCamelCase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 0
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_SCREAMING_SNAKE_CASE = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> str:
'''simple docstring'''
super().__init__()
__lowercase = torchvision.models.resnetaaa(pretrained=__lowercase )
__lowercase = list(model.children() )[:-2]
__lowercase = nn.Sequential(*__lowercase )
__lowercase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.pool(self.model(__lowercase ) )
__lowercase = torch.flatten(__lowercase ,start_dim=2 )
__lowercase = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowercase ( lowercase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = [json.loads(__lowercase ) for l in open(__lowercase )]
__lowercase = os.path.dirname(__lowercase )
__lowercase = tokenizer
__lowercase = labels
__lowercase = len(__lowercase )
__lowercase = max_seq_length
__lowercase = transforms
def __len__(self ) -> Tuple:
'''simple docstring'''
return len(self.data )
def __getitem__(self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] ,add_special_tokens=__lowercase ) )
__lowercase , __lowercase , __lowercase = sentence[0], sentence[1:-1], sentence[-1]
__lowercase = sentence[: self.max_seq_length]
__lowercase = torch.zeros(self.n_classes )
__lowercase = 1
__lowercase = Image.open(os.path.join(self.data_dir ,self.data[index]['''img'''] ) ).convert('''RGB''' )
__lowercase = self.transforms(__lowercase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
__lowercase = [len(row['''sentence'''] ) for row in batch]
__lowercase , __lowercase = len(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
__lowercase = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.long )
__lowercase = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
__lowercase = input_row['''sentence''']
__lowercase = 1
__lowercase = torch.stack([row['''image'''] for row in batch] )
__lowercase = torch.stack([row['''label'''] for row in batch] )
__lowercase = torch.stack([row['''image_start_token'''] for row in batch] )
__lowercase = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _lowerCAmelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _lowerCAmelCase ( ):
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 715
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
| 0
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class __lowercase ( __snake_case ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(*A_ ,**A_ )
if config is None:
assert isinstance(self.model ,A_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
__lowercase = self.model.config
else:
__lowercase = config
__lowercase = data_args
__lowercase = self.config.tgt_vocab_size if isinstance(self.config ,A_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
''' padding..''' )
if self.args.label_smoothing == 0:
__lowercase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__lowercase = label_smoothed_nll_loss
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.optimizer is None:
__lowercase = ["bias", "LayerNorm.weight"]
__lowercase = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
__lowercase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__lowercase = Adafactor
__lowercase = {"scale_parameter": False, "relative_step": False}
else:
__lowercase = AdamW
__lowercase = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
__lowercase = self.args.learning_rate
if self.sharded_ddp:
__lowercase = OSS(
params=A_ ,optim=A_ ,**A_ ,)
else:
__lowercase = optimizer_cls(A_ ,**A_ )
if self.lr_scheduler is None:
__lowercase = self._get_lr_scheduler(A_ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__lowercase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__lowercase = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
__lowercase = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=A_ )
return scheduler
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__lowercase = model(**A_ ,use_cache=A_ )[0]
__lowercase = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
__lowercase = model(**A_ ,labels=A_ ,use_cache=A_ )[:2]
else:
# compute label smoothed loss
__lowercase = model(**A_ ,use_cache=A_ )[0]
__lowercase = torch.nn.functional.log_softmax(A_ ,dim=-1 )
__lowercase = self.loss_fn(A_ ,A_ ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = inputs.pop('''labels''' )
__lowercase = self._compute_loss(A_ ,A_ ,A_ )
return loss
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,) -> Optional[Any]:
'''simple docstring'''
__lowercase = self._prepare_inputs(A_ )
__lowercase = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__lowercase = self.model.generate(
inputs['''input_ids'''] ,attention_mask=inputs['''attention_mask'''] ,**A_ ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__lowercase = self._pad_tensors_to_max_len(A_ ,gen_kwargs['''max_length'''] )
__lowercase = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
__lowercase = self._compute_loss(A_ ,A_ ,A_ )
__lowercase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__lowercase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__lowercase = self._pad_tensors_to_max_len(A_ ,gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f" padded to `max_length`={max_length}" )
__lowercase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
__lowercase = tensor
return padded_tensor
| 716
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 0
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_SCREAMING_SNAKE_CASE : List[Any] = sys.version_info >= (3, 1_0)
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Union[str, Any]=None ):
return field(default_factory=lambda: default , metadata=lowerCamelCase_ )
@dataclass
class __lowercase :
'''simple docstring'''
a : int
a : float
a : str
a : bool
@dataclass
class __lowercase :
'''simple docstring'''
a : int = 42
a : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class __lowercase :
'''simple docstring'''
a : bool = False
a : bool = True
a : Optional[bool] = None
class __lowercase ( UpperCamelCase__ ):
'''simple docstring'''
a : Optional[int] = """titi"""
a : int = """toto"""
class __lowercase ( UpperCamelCase__ ):
'''simple docstring'''
a : Union[str, Any] = """titi"""
a : Any = """toto"""
a : Union[str, Any] = 42
@dataclass
class __lowercase :
'''simple docstring'''
a : BasicEnum = "toto"
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = BasicEnum(self.foo )
@dataclass
class __lowercase :
'''simple docstring'''
a : MixedTypeEnum = "toto"
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = MixedTypeEnum(self.foo )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[int] = None
a : Optional[float] = field(default=UpperCamelCase__ , metadata={"help": "help message"} )
a : Optional[str] = None
a : Optional[List[str]] = list_field(default=[] )
a : Optional[List[int]] = list_field(default=[] )
@dataclass
class __lowercase :
'''simple docstring'''
a : List[int] = list_field(default=[] )
a : List[int] = list_field(default=[1, 2, 3] )
a : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
a : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __lowercase :
'''simple docstring'''
a : List[int] = field()
a : str = field()
a : BasicEnum = field()
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = BasicEnum(self.required_enum )
@dataclass
class __lowercase :
'''simple docstring'''
a : int
a : "BasicEnum" = field()
a : "Optional[bool]" = None
a : "str" = field(default="toto" , metadata={"help": "help message"} )
a : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class __lowercase :
'''simple docstring'''
a : bool = False
a : bool = True
a : bool | None = None
@dataclass
class __lowercase :
'''simple docstring'''
a : int | None = None
a : float | None = field(default=UpperCamelCase__ , metadata={"help": "help message"} )
a : str | None = None
a : list[str] | None = list_field(default=[] )
a : list[int] | None = list_field(default=[] )
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
self.assertEqual(len(a._actions ) ,len(b._actions ) )
for x, y in zip(a._actions ,b._actions ):
__lowercase = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
__lowercase = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' ,_lowerCamelCase ) and yy.get('''choices''' ,_lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) ,yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = HfArgumentParser(_lowerCamelCase )
__lowercase = argparse.ArgumentParser()
expected.add_argument('''--foo''' ,type=_lowerCamelCase ,required=_lowerCamelCase )
expected.add_argument('''--bar''' ,type=_lowerCamelCase ,required=_lowerCamelCase )
expected.add_argument('''--baz''' ,type=_lowerCamelCase ,required=_lowerCamelCase )
expected.add_argument('''--flag''' ,type=_lowerCamelCase ,default=_lowerCamelCase ,const=_lowerCamelCase ,nargs='''?''' )
self.argparsersEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((__lowercase ) , ) = parser.parse_args_into_dataclasses(_lowerCamelCase ,look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = HfArgumentParser(_lowerCamelCase )
__lowercase = argparse.ArgumentParser()
expected.add_argument('''--foo''' ,default=42 ,type=_lowerCamelCase )
expected.add_argument('''--baz''' ,default='''toto''' ,type=_lowerCamelCase ,help='''help message''' )
self.argparsersEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = argparse.ArgumentParser()
expected.add_argument('''--foo''' ,type=_lowerCamelCase ,default=_lowerCamelCase ,const=_lowerCamelCase ,nargs='''?''' )
expected.add_argument('''--baz''' ,type=_lowerCamelCase ,default=_lowerCamelCase ,const=_lowerCamelCase ,nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' ,action='''store_false''' ,default=_lowerCamelCase ,dest='''baz''' )
expected.add_argument('''--opt''' ,type=_lowerCamelCase ,default=_lowerCamelCase )
__lowercase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
__lowercase = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = parser.parse_args([] )
self.assertEqual(_lowerCamelCase ,Namespace(foo=_lowerCamelCase ,baz=_lowerCamelCase ,opt=_lowerCamelCase ) )
__lowercase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase ,Namespace(foo=_lowerCamelCase ,baz=_lowerCamelCase ,opt=_lowerCamelCase ) )
__lowercase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase ,Namespace(foo=_lowerCamelCase ,baz=_lowerCamelCase ,opt=_lowerCamelCase ) )
__lowercase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase ,Namespace(foo=_lowerCamelCase ,baz=_lowerCamelCase ,opt=_lowerCamelCase ) )
__lowercase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase ,Namespace(foo=_lowerCamelCase ,baz=_lowerCamelCase ,opt=_lowerCamelCase ) )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = HfArgumentParser(_lowerCamelCase )
__lowercase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' ,default='''toto''' ,choices=['''titi''', '''toto''', 42] ,type=make_choice_type_function(['''titi''', '''toto''', 42] ) ,)
self.argparsersEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = parser.parse_args([] )
self.assertEqual(args.foo ,'''toto''' )
__lowercase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.toto )
__lowercase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo ,'''titi''' )
__lowercase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.titi )
__lowercase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo ,42 )
__lowercase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.fourtytwo )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
@dataclass
class __lowercase :
'''simple docstring'''
a : Literal["titi", "toto", 42] = "toto"
__lowercase = HfArgumentParser(_lowerCamelCase )
__lowercase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' ,default='''toto''' ,choices=('''titi''', '''toto''', 42) ,type=make_choice_type_function(['''titi''', '''toto''', 42] ) ,)
self.argparsersEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = parser.parse_args([] )
self.assertEqual(args.foo ,'''toto''' )
__lowercase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo ,'''titi''' )
__lowercase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo ,42 )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = HfArgumentParser(_lowerCamelCase )
__lowercase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' ,nargs='''+''' ,default=[] ,type=_lowerCamelCase )
expected.add_argument('''--bar_int''' ,nargs='''+''' ,default=[1, 2, 3] ,type=_lowerCamelCase )
expected.add_argument('''--foo_str''' ,nargs='''+''' ,default=['''Hallo''', '''Bonjour''', '''Hello'''] ,type=_lowerCamelCase )
expected.add_argument('''--foo_float''' ,nargs='''+''' ,default=[0.1, 0.2, 0.3] ,type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase ,Namespace(foo_int=[] ,bar_int=[1, 2, 3] ,foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] ,foo_float=[0.1, 0.2, 0.3] ) ,)
__lowercase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase ,Namespace(foo_int=[1] ,bar_int=[2, 3] ,foo_str=['''a''', '''b''', '''c'''] ,foo_float=[0.1, 0.7] ) )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = argparse.ArgumentParser()
expected.add_argument('''--foo''' ,default=_lowerCamelCase ,type=_lowerCamelCase )
expected.add_argument('''--bar''' ,default=_lowerCamelCase ,type=_lowerCamelCase ,help='''help message''' )
expected.add_argument('''--baz''' ,default=_lowerCamelCase ,type=_lowerCamelCase )
expected.add_argument('''--ces''' ,nargs='''+''' ,default=[] ,type=_lowerCamelCase )
expected.add_argument('''--des''' ,nargs='''+''' ,default=[] ,type=_lowerCamelCase )
__lowercase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
__lowercase = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = parser.parse_args([] )
self.assertEqual(_lowerCamelCase ,Namespace(foo=_lowerCamelCase ,bar=_lowerCamelCase ,baz=_lowerCamelCase ,ces=[] ,des=[] ) )
__lowercase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase ,Namespace(foo=12 ,bar=3.1_4 ,baz='''42''' ,ces=['''a''', '''b''', '''c'''] ,des=[1, 2, 3] ) )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = HfArgumentParser(_lowerCamelCase )
__lowercase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' ,nargs='''+''' ,type=_lowerCamelCase ,required=_lowerCamelCase )
expected.add_argument('''--required_str''' ,type=_lowerCamelCase ,required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' ,type=make_choice_type_function(['''titi''', '''toto'''] ) ,choices=['''titi''', '''toto'''] ,required=_lowerCamelCase ,)
self.argparsersEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = HfArgumentParser(_lowerCamelCase )
__lowercase = argparse.ArgumentParser()
expected.add_argument('''--foo''' ,type=_lowerCamelCase ,required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' ,type=make_choice_type_function(['''titi''', '''toto'''] ) ,choices=['''titi''', '''toto'''] ,required=_lowerCamelCase ,)
expected.add_argument('''--opt''' ,type=_lowerCamelCase ,default=_lowerCamelCase )
expected.add_argument('''--baz''' ,default='''toto''' ,type=_lowerCamelCase ,help='''help message''' )
expected.add_argument('''--foo_str''' ,nargs='''+''' ,default=['''Hallo''', '''Bonjour''', '''Hello'''] ,type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = HfArgumentParser(_lowerCamelCase )
__lowercase = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
__lowercase = parser.parse_dict(_lowerCamelCase )[0]
__lowercase = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = HfArgumentParser(_lowerCamelCase )
__lowercase = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase ,parser.parse_dict ,_lowerCamelCase ,allow_extra_keys=_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = HfArgumentParser(_lowerCamelCase )
__lowercase = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase ,'''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' ,'''w+''' ) as f:
json.dump(_lowerCamelCase ,_lowerCamelCase )
__lowercase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
__lowercase = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = HfArgumentParser(_lowerCamelCase )
__lowercase = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase ,'''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' ,'''w+''' ) as f:
yaml.dump(_lowerCamelCase ,_lowerCamelCase )
__lowercase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
__lowercase = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 717
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=13 ,_lowerCamelCase=7 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=99 ,_lowerCamelCase=32 ,_lowerCamelCase=2 ,_lowerCamelCase=4 ,_lowerCamelCase=37 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=512 ,_lowerCamelCase=16 ,_lowerCamelCase=2 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=3 ,_lowerCamelCase=4 ,_lowerCamelCase=None ,_lowerCamelCase=0 ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = projection_dim
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,)
__lowercase = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = TFDPRContextEncoder(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = TFDPRQuestionEncoder(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = TFDPRReader(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
(
__lowercase
) = config_and_inputs
__lowercase = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
a : List[str] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
a : int = False
a : Optional[Any] = False
a : Union[str, Any] = False
a : Tuple = False
a : int = False
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = TFDPRModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=37 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowerCamelCase__ )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowerCamelCase__ )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowerCamelCase__ )
@slow
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFDPRContextEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFDPRContextEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFDPRQuestionEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFDPRReader.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
__lowercase = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
__lowercase = model(lowerCamelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__lowercase = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 718
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.