code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = """xlm-roberta-xl"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any]=25_08_80 , SCREAMING_SNAKE_CASE__ : Tuple=25_60 , SCREAMING_SNAKE_CASE__ : Any=36 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : str=1_02_40 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=5_14 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-05 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]="absolute" , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Tuple=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Tuple:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class lowerCAmelCase__ ( __lowercase ):
@property
def __A ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 270
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : int ) -> Optional[int]:
__lowerCamelCase = []
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
self.events.append('''on_init_end''' )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
self.events.append('''on_train_begin''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
self.events.append('''on_train_end''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
self.events.append('''on_epoch_begin''' )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
self.events.append('''on_epoch_end''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.events.append('''on_step_begin''' )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
self.events.append('''on_step_end''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
self.events.append('''on_evaluate''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> str:
self.events.append('''on_predict''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
self.events.append('''on_save''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
self.events.append('''on_log''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
self.events.append('''on_prediction_step''' )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = tempfile.mkdtemp()
def __A ( self : int ) -> List[str]:
shutil.rmtree(self.output_dir )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : List[str]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionModelConfig(a=SCREAMING_SNAKE_CASE__ , b=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE__ , report_to=[] , **SCREAMING_SNAKE_CASE__ )
return Trainer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , callbacks=SCREAMING_SNAKE_CASE__ , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
# Order doesn't matter
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
for cba, cba in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , cba.__class__ )
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCamelCase = ['''on_init_end''', '''on_train_begin''']
__lowerCamelCase = 0
__lowerCamelCase = len(trainer.get_eval_dataloader() )
__lowerCamelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(SCREAMING_SNAKE_CASE__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# Callbacks passed at init are added to the default callbacks
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowerCamelCase = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> str:
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowerCamelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# We can also add, pop, or remove by instance
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> Any:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# Independent log/save/eval
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# A bit of everything
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE__ ) in warn_mock.call_args[0][0]
| 270
| 1
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 1.5
__SCREAMING_SNAKE_CASE = int(factor * num_class_images )
__SCREAMING_SNAKE_CASE = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCamelCase_ , aesthetic_weight=0.1 )
os.makedirs(f"{class_data_dir}/images" , exist_ok=UpperCamelCase_ )
if len(list(Path(f"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
__SCREAMING_SNAKE_CASE = client.query(text=UpperCamelCase_ )
if len(UpperCamelCase_ ) >= factor * num_class_images or num_images > 1e4:
break
else:
__SCREAMING_SNAKE_CASE = int(factor * num_images )
__SCREAMING_SNAKE_CASE = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCamelCase_ , aesthetic_weight=0.1 , )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = tqdm(desc="""downloading real regularization images""" , total=UpperCamelCase_ )
with open(f"{class_data_dir}/caption.txt" , """w""" ) as fa, open(f"{class_data_dir}/urls.txt" , """w""" ) as fa, open(
f"{class_data_dir}/images.txt" , """w""" ) as fa:
while total < num_class_images:
__SCREAMING_SNAKE_CASE = class_images[count]
count += 1
try:
__SCREAMING_SNAKE_CASE = requests.get(images["""url"""] )
if img.status_code == 200:
__SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content ) )
with open(f"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f"{class_data_dir}/images/{total}.jpg" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser("""""" , add_help=UpperCamelCase_ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCamelCase_ , type=UpperCamelCase_ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCamelCase_ , type=UpperCamelCase_ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCamelCase_ )
return parser.parse_args()
if __name__ == "__main__":
__magic_name__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 360
|
"""simple docstring"""
import inspect
import unittest
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def snake_case_ ( self):
import diffusers
from diffusers.dependency_versions_table import deps
__SCREAMING_SNAKE_CASE = inspect.getmembers(lowerCAmelCase__ , inspect.isclass)
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__SCREAMING_SNAKE_CASE = """k-diffusion"""
elif backend == "invisible_watermark":
__SCREAMING_SNAKE_CASE = """invisible-watermark"""
assert backend in deps, f"{backend} is not in the deps table!"
| 255
| 0
|
from math import isqrt
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :str = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase :str = False
return [i for i in range(2 , _UpperCAmelCase ) if is_prime[i]]
def _A ( SCREAMING_SNAKE_CASE__ : int = 10**8 ):
UpperCamelCase :Dict = calculate_prime_numbers(max_number // 2 )
UpperCamelCase :Union[str, Any] = 0
UpperCamelCase :Any = 0
UpperCamelCase :List[str] = len(_UpperCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 259
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["input_features", "is_longer"]
def __init__( self : str , A : int=64 , A : Dict=48000 , A : str=480 , A : List[Any]=10 , A : Optional[Any]=1024 , A : Tuple=0.0 , A : List[Any]=False , A : float = 0 , A : float = 14000 , A : int = None , A : str = "fusion" , A : str = "repeatpad" , **A : Dict , ):
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , return_attention_mask=A , **A , )
_UpperCAmelCase : Optional[Any] = top_db
_UpperCAmelCase : Dict = truncation
_UpperCAmelCase : List[Any] = padding
_UpperCAmelCase : Optional[Any] = fft_window_size
_UpperCAmelCase : Dict = (fft_window_size >> 1) + 1
_UpperCAmelCase : Any = hop_length
_UpperCAmelCase : Tuple = max_length_s
_UpperCAmelCase : str = max_length_s * sampling_rate
_UpperCAmelCase : Any = sampling_rate
_UpperCAmelCase : Optional[int] = frequency_min
_UpperCAmelCase : str = frequency_max
_UpperCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A , min_frequency=A , max_frequency=A , sampling_rate=A , norm=A , mel_scale="htk" , )
_UpperCAmelCase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A , min_frequency=A , max_frequency=A , sampling_rate=A , norm="slaney" , mel_scale="slaney" , )
def _A ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _A ( self : Optional[Any] , A : np.array , A : Optional[np.array] = None ):
_UpperCAmelCase : Dict = spectrogram(
A , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=A , log_mel="dB" , )
return log_mel_spectrogram.T
def _A ( self : str , A : str , A : List[str] , A : List[Any] ):
_UpperCAmelCase : List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : Optional[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : Tuple = [0]
# randomly choose index for each part
_UpperCAmelCase : Dict = np.random.choice(ranges[0] )
_UpperCAmelCase : str = np.random.choice(ranges[1] )
_UpperCAmelCase : Tuple = np.random.choice(ranges[2] )
_UpperCAmelCase : str = mel[idx_front : idx_front + chunk_frames, :]
_UpperCAmelCase : str = mel[idx_middle : idx_middle + chunk_frames, :]
_UpperCAmelCase : List[Any] = mel[idx_back : idx_back + chunk_frames, :]
_UpperCAmelCase : Dict = torch.tensor(mel[None, None, :] )
_UpperCAmelCase : Optional[Any] = torch.nn.functional.interpolate(
A , size=[chunk_frames, 64] , mode="bilinear" , align_corners=A )
_UpperCAmelCase : List[str] = mel_shrink[0][0].numpy()
_UpperCAmelCase : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _A ( self : List[Any] , A : np.array , A : List[str] , A : Any , A : Optional[int] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_UpperCAmelCase : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_UpperCAmelCase : str = len(A ) - max_length
_UpperCAmelCase : str = np.random.randint(0 , overflow + 1 )
_UpperCAmelCase : int = waveform[idx : idx + max_length]
_UpperCAmelCase : Any = self._np_extract_fbank_features(A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_UpperCAmelCase : Tuple = self._np_extract_fbank_features(A , self.mel_filters )
_UpperCAmelCase : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_UpperCAmelCase : Optional[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_UpperCAmelCase : Any = np.stack([mel, mel, mel, mel] , axis=0 )
_UpperCAmelCase : int = False
else:
_UpperCAmelCase : Tuple = self._random_mel_fusion(A , A , A )
_UpperCAmelCase : Any = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
_UpperCAmelCase : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_UpperCAmelCase : str = int(max_length / len(A ) )
_UpperCAmelCase : Dict = np.stack(np.tile(A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_UpperCAmelCase : Dict = int(max_length / len(A ) )
_UpperCAmelCase : List[str] = np.stack(np.tile(A , A ) )
_UpperCAmelCase : Optional[Any] = np.pad(A , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
_UpperCAmelCase : str = self._np_extract_fbank_features(A , self.mel_filters )
_UpperCAmelCase : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_UpperCAmelCase : List[str] = self._np_extract_fbank_features(A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : str = None , A : Optional[str] = None , A : Optional[int] = None , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , **A : List[str] , ):
_UpperCAmelCase : int = truncation if truncation is not None else self.truncation
_UpperCAmelCase : Optional[int] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Any = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : Optional[Any] = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : int = [np.asarray(A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
_UpperCAmelCase : List[str] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : List[str] = [np.asarray(A )]
# convert to mel spectrogram, truncate and pad if needed.
_UpperCAmelCase : Dict = [
self._get_input_mel(A , max_length if max_length else self.nb_max_samples , A , A )
for waveform in raw_speech
]
_UpperCAmelCase : int = []
_UpperCAmelCase : Optional[Any] = []
for mel, longer in padded_inputs:
input_mel.append(A )
is_longer.append(A )
if truncation == "fusion" and sum(A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_UpperCAmelCase : Union[str, Any] = np.random.randint(0 , len(A ) )
_UpperCAmelCase : Optional[Any] = True
if isinstance(input_mel[0] , A ):
_UpperCAmelCase : List[str] = [np.asarray(A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_UpperCAmelCase : Tuple = [[longer] for longer in is_longer]
_UpperCAmelCase : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
_UpperCAmelCase : Tuple = BatchFeature(A )
if return_tensors is not None:
_UpperCAmelCase : List[Any] = input_features.convert_to_tensors(A )
return input_features
| 31
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = min(_SCREAMING_SNAKE_CASE ) # min() finds the minimum value
_snake_case = max(_SCREAMING_SNAKE_CASE ) # max() finds the maximum value
_snake_case = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_snake_case = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_snake_case = 0
for count in range(_SCREAMING_SNAKE_CASE ):
while holes[count] > 0:
holes[count] -= 1
_snake_case = count + min_val
i += 1
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_SCREAMING_SNAKE_CASE )
print("""Sorted order is:""" , """ """.join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 358
|
'''simple docstring'''
import qiskit
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
_snake_case = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_snake_case = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 270
| 0
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase_ = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None:
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def snake_case__( self : str ) ->List[Any]:
return self.sp_model.get_piece_size()
def snake_case__( self : int ) ->Union[str, Any]:
snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) ->Any:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]:
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple:
return self.sp_model.piece_to_id(_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase )
return token
def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]:
snake_case_ = []
snake_case_ = ''''''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(_UpperCamelCase )
snake_case_ = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str:
snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase )
snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
snake_case_ = []
sub_texts.append(_UpperCamelCase )
else:
current_sub_text.append(_UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) )
else:
snake_case_ = ''''''.join(_UpperCamelCase )
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(_UpperCamelCase )
return clean_text
else:
return text
def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 8
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = "vit_msn"
def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
| 8
| 1
|
from __future__ import annotations
__SCREAMING_SNAKE_CASE = 10
def UpperCAmelCase ( _lowerCamelCase ):
A : Any = 1
A : Any = max(__lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
A : list[list] = [[] for _ in range(__lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
A : int = int((i / placement) % RADIX )
buckets[tmp].append(__lowerCamelCase )
# put each buckets' contents into list_of_ints
A : Any = 0
for b in range(__lowerCamelCase ):
for i in buckets[b]:
A : str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256
| 0
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = {}
__UpperCamelCase = job['started_at']
__UpperCamelCase = job['completed_at']
__UpperCamelCase = date_parser.parse(UpperCAmelCase_ )
__UpperCamelCase = date_parser.parse(UpperCAmelCase_ )
__UpperCamelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__UpperCamelCase = start
__UpperCamelCase = end
__UpperCamelCase = duration_in_min
return job_info
def _lowercase ( __A ,__A=None ):
'''simple docstring'''
__UpperCamelCase = None
if token is not None:
__UpperCamelCase = {'Accept': 'application/vnd.github+json', 'Authorization': f"Bearer {token}"}
__UpperCamelCase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
__UpperCamelCase = requests.get(UpperCAmelCase_ ,headers=UpperCAmelCase_ ).json()
__UpperCamelCase = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(UpperCAmelCase_ ) for job in result["""jobs"""]} )
__UpperCamelCase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCAmelCase_ ):
__UpperCamelCase = requests.get(url + f"&page={i + 2}" ,headers=UpperCAmelCase_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(UpperCAmelCase_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
a__ : Dict = parser.parse_args()
a__ : Union[str, Any] = get_job_time(args.workflow_run_id)
a__ : Any = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 349
|
'''simple docstring'''
A__ : Any = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 185
| 0
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : dict ):
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , """html.parser""" )
snake_case = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 366
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaControlnetImgaImgPipeline
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def lowerCAmelCase ( self : Dict )-> str:
return 32
@property
def lowerCAmelCase ( self : int )-> List[str]:
return 32
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return self.time_input_dim
@property
def lowerCAmelCase ( self : Optional[Any] )-> Any:
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : str )-> Union[str, Any]:
return 1_00
@property
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
snake_case = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : str )-> List[str]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : int )-> Dict:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple=0 )-> List[Any]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("""mps""" ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case = """cpu"""
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case = init_image.resize((5_12, 5_12) )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case = torch.from_numpy(np.array(__snake_case ) ).float() / 2_55.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = """A robot, 4k photo"""
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt="""""" , ).to_tuple()
snake_case = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 3
| 0
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_snake_case = logging.getLogger(__name__)
_snake_case = tf.data.AUTOTUNE
def A ( ):
'''simple docstring'''
_lowerCAmelCase : str = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=_lowerCamelCase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=_lowerCamelCase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=_lowerCamelCase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=_lowerCamelCase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=_lowerCamelCase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=_lowerCamelCase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=_lowerCamelCase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=_lowerCamelCase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=_lowerCamelCase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=_lowerCamelCase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=_lowerCamelCase , default=1e-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=_lowerCamelCase , default=1e-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=_lowerCamelCase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=_lowerCamelCase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=_lowerCamelCase , help="Model ID to upload to on the Hugging Face Hub." )
_lowerCAmelCase : List[Any] = parser.parse_args()
return args
def A ( _lowerCamelCase ):
'''simple docstring'''
try:
if args.tpu_name:
_lowerCAmelCase : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_lowerCAmelCase : Dict = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(_lowerCamelCase )
tf.tpu.experimental.initialize_tpu_system(_lowerCamelCase )
return tpu
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 0
for file in file_list:
_lowerCAmelCase : Optional[int] = file.split("/" )[-1]
_lowerCAmelCase : str = re.search(r"-\d+-(\d+)\.tfrecord" , _lowerCamelCase ).group(1 )
_lowerCAmelCase : Tuple = int(_lowerCamelCase )
num_samples += sample_count
return num_samples
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = count_samples(_lowerCamelCase )
_lowerCAmelCase : Tuple = tf.data.Dataset.from_tensor_slices(_lowerCamelCase )
if shuffle:
_lowerCAmelCase : Union[str, Any] = dataset.shuffle(len(_lowerCamelCase ) )
_lowerCAmelCase : List[Any] = tf.data.TFRecordDataset(_lowerCamelCase , num_parallel_reads=_lowerCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_lowerCAmelCase : Optional[Any] = dataset.apply(tf.data.experimental.assert_cardinality(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = dataset.map(_lowerCamelCase , num_parallel_calls=_lowerCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
_lowerCAmelCase : Any = dataset.shuffle(args.shuffle_buffer_size )
_lowerCAmelCase : Optional[Any] = dataset.batch(_lowerCamelCase , drop_remainder=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = dataset.map(_lowerCamelCase , num_parallel_calls=_lowerCamelCase )
_lowerCAmelCase : List[str] = dataset.prefetch(_lowerCamelCase )
return dataset
def A ( _lowerCamelCase ):
'''simple docstring'''
if not args.no_tpu:
_lowerCAmelCase : Optional[Any] = initialize_tpu(_lowerCamelCase )
_lowerCAmelCase : int = tf.distribute.TPUStrategy(_lowerCamelCase )
else:
_lowerCAmelCase : int = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer )
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(args.pretrained_model_config )
_lowerCAmelCase : Dict = tokenizer.vocab_size
_lowerCAmelCase : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F"No .tfrecord files found in {args.train_dataset}." )
_lowerCAmelCase : int = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F"No .tfrecord files found in {args.eval_dataset}." )
_lowerCAmelCase : Tuple = count_samples(_lowerCamelCase )
_lowerCAmelCase : List[str] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_lowerCAmelCase : Any = steps_per_epoch * args.num_epochs
with strategy.scope():
_lowerCAmelCase : str = TFAutoModelForMaskedLM.from_config(_lowerCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_lowerCAmelCase , _lowerCAmelCase : str = create_optimizer(
num_train_steps=_lowerCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_lowerCamelCase , metrics=["accuracy"] )
def decode_fn(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_lowerCamelCase , _lowerCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_lowerCAmelCase : str = DataCollatorForLanguageModeling(
tokenizer=_lowerCamelCase , mlm_probability=args.mlm_probability , mlm=_lowerCamelCase , return_tensors="tf" )
def mask_with_collator(_lowerCamelCase ):
# TF really needs an isin() function
_lowerCAmelCase : Optional[int] = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
_lowerCAmelCase , _lowerCAmelCase : Dict = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(_lowerCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_lowerCamelCase , )
return batch
_lowerCAmelCase : Union[str, Any] = args.per_replica_batch_size * strategy.num_replicas_in_sync
_lowerCAmelCase : Optional[int] = prepare_dataset(
_lowerCamelCase , decode_fn=_lowerCamelCase , mask_fn=_lowerCamelCase , batch_size=_lowerCamelCase , shuffle=_lowerCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
_lowerCAmelCase : Optional[Any] = prepare_dataset(
_lowerCamelCase , decode_fn=_lowerCamelCase , mask_fn=_lowerCamelCase , batch_size=_lowerCamelCase , shuffle=_lowerCamelCase , )
_lowerCAmelCase : Dict = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_lowerCamelCase ) )
model.fit(
_lowerCamelCase , validation_data=_lowerCamelCase , epochs=args.num_epochs , callbacks=_lowerCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_snake_case = parse_args()
main(args)
| 36
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def constraint_to_multiple_of(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=None ):
_lowerCAmelCase : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCAmelCase : List[str] = math.ceil(val / multiple ) * multiple
return x
_lowerCAmelCase : Union[str, Any] = (output_size, output_size) if isinstance(_lowerCamelCase , _lowerCamelCase ) else output_size
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_image_size(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = output_size
# determine new height and width
_lowerCAmelCase : List[Any] = output_height / input_height
_lowerCAmelCase : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCAmelCase : Union[str, Any] = scale_width
else:
# fit height
_lowerCAmelCase : Union[str, Any] = scale_height
_lowerCAmelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCamelCase )
_lowerCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCamelCase )
return (new_height, new_width)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = False, __a = 1, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = size if size is not None else {"height": 384, "width": 384}
_lowerCAmelCase : Optional[int] = get_size_dict(__a)
_lowerCAmelCase : Optional[Any] = do_resize
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Any = keep_aspect_ratio
_lowerCAmelCase : str = ensure_multiple_of
_lowerCAmelCase : str = resample
_lowerCAmelCase : Dict = do_rescale
_lowerCAmelCase : Optional[int] = rescale_factor
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, __a, __a, __a = False, __a = 1, __a = PILImageResampling.BICUBIC, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
_lowerCAmelCase : List[Any] = get_resize_output_image_size(
__a, output_size=(size["height"], size["width"]), keep_aspect_ratio=__a, multiple=__a, )
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : List[Any] = size if size is not None else self.size
_lowerCAmelCase : str = get_size_dict(__a)
_lowerCAmelCase : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCAmelCase : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCAmelCase : int = resample if resample is not None else self.resample
_lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[Any] = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : List[Any] = [to_numpy_array(__a) for image in images]
if do_resize:
_lowerCAmelCase : Any = [self.resize(image=__a, size=__a, resample=__a) for image in images]
if do_rescale:
_lowerCAmelCase : List[str] = [self.rescale(image=__a, scale=__a) for image in images]
if do_normalize:
_lowerCAmelCase : Dict = [self.normalize(image=__a, mean=__a, std=__a) for image in images]
_lowerCAmelCase : List[str] = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a) != len(__a):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__a):
_lowerCAmelCase : List[Any] = target_sizes.numpy()
_lowerCAmelCase : Dict = []
for idx in range(len(__a)):
_lowerCAmelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a)
_lowerCAmelCase : int = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__a)
else:
_lowerCAmelCase : Dict = logits.argmax(dim=1)
_lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 36
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__a: Tuple = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "albert"
def __init__( self , __lowerCAmelCase=30000 , __lowerCAmelCase=128 , __lowerCAmelCase=4096 , __lowerCAmelCase=12 , __lowerCAmelCase=1 , __lowerCAmelCase=64 , __lowerCAmelCase=16384 , __lowerCAmelCase=1 , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0.1 , __lowerCAmelCase="absolute" , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , **__lowerCAmelCase , ) -> List[Any]:
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Tuple = vocab_size
lowercase__ : Optional[Any] = embedding_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : List[Any] = num_hidden_groups
lowercase__ : Any = num_attention_heads
lowercase__ : List[str] = inner_group_num
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Dict = intermediate_size
lowercase__ : int = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : int = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Tuple = classifier_dropout_prob
lowercase__ : int = position_embedding_type
class UpperCAmelCase ( a__ ):
'''simple docstring'''
@property
def _lowerCAmelCase( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 214
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , UpperCAmelCase )
lowercase__ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase__ : str = dataset_size < in_memory_max_size
else:
lowercase__ : Optional[int] = False
lowercase__ : Optional[Any] = is_small_dataset(UpperCAmelCase )
assert result == expected
| 214
| 1
|
def a__ ( A_, A_ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__magic_name__ = str(bin(A_ ) )[2:] # remove the leading "0b"
__magic_name__ = str(bin(A_ ) )[2:] # remove the leading "0b"
__magic_name__ = max(len(A_ ), len(A_ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(A_ ), b_binary.zfill(A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pegasus"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=5_0265 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=1 , **UpperCamelCase__ : Union[str, Any] , ) -> str:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
@property
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return self.d_model
| 88
| 1
|
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 10**12 ) -> int:
"""simple docstring"""
UpperCamelCase :Tuple = 1
UpperCamelCase :str = 0
UpperCamelCase :Optional[int] = 1
UpperCamelCase :List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 62
|
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 1000 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , __magic_name__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 62
| 1
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __lowerCAmelCase ( yaml.SafeLoader ):
def UpperCamelCase ( self : Tuple , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value]
_UpperCAmelCase = [tuple(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else key for key in keys]
_UpperCAmelCase = Counter(snake_case__ )
_UpperCAmelCase = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCamelCase ( self : str , snake_case__ : Optional[int] , snake_case__ : Dict=False ):
"""simple docstring"""
_UpperCAmelCase = super().construct_mapping(snake_case__ , deep=snake_case__ )
self._check_no_duplicates_on_constructed_node(snake_case__ )
return mapping
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_UpperCAmelCase = full_content[1:].index("---" ) + 1
_UpperCAmelCase = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(snake_case_ )
class __lowerCAmelCase ( UpperCAmelCase__ ):
# class attributes
snake_case_ : Optional[int] = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls : Optional[int] , snake_case__ : Path ):
"""simple docstring"""
with open(snake_case__ , encoding="utf-8" ) as readme_file:
_UpperCAmelCase , _UpperCAmelCase = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(snake_case__ )
else:
return cls()
def UpperCamelCase ( self : List[str] , snake_case__ : Path ):
"""simple docstring"""
if path.exists():
with open(snake_case__ , encoding="utf-8" ) as readme_file:
_UpperCAmelCase = readme_file.read()
else:
_UpperCAmelCase = None
_UpperCAmelCase = self._to_readme(snake_case__ )
with open(snake_case__ , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(snake_case__ )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
_UpperCAmelCase , _UpperCAmelCase = _split_yaml_from_readme(snake_case__ )
_UpperCAmelCase = "---\n" + self.to_yaml_string() + "---\n" + content
else:
_UpperCAmelCase = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def UpperCamelCase ( cls : str , snake_case__ : str ):
"""simple docstring"""
_UpperCAmelCase = yaml.load(snake_case__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_UpperCAmelCase = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**snake_case__ )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=snake_case__ , allow_unicode=snake_case__ , encoding="utf-8" , ).decode("utf-8" )
lowercase_ : Tuple = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowercase_ : str = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
lowercase_ : Dict = ap.parse_args()
lowercase_ : List[Any] = Path(args.readme_filepath)
lowercase_ : Any = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 133
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase_ : Union[str, Any] = data_utils.TransfoXLTokenizer
lowercase_ : int = data_utils.TransfoXLCorpus
lowercase_ : Dict = data_utils
lowercase_ : Dict = data_utils
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , "rb" ) as fp:
_UpperCAmelCase = pickle.load(snake_case_ , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCAmelCase = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
_UpperCAmelCase = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
_UpperCAmelCase = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , snake_case_ )
_UpperCAmelCase = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCAmelCase = os.path.abspath(snake_case_ )
_UpperCAmelCase = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCAmelCase = TransfoXLConfig()
else:
_UpperCAmelCase = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = TransfoXLLMHeadModel(snake_case_ )
_UpperCAmelCase = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowercase_ : List[Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 133
| 1
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCAmelCase__ :int = HfArgumentParser(InitializationArguments)
lowerCAmelCase__ :Dict = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCAmelCase__ :Any = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCAmelCase__ :int = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
lowerCAmelCase__ :Optional[int] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCAmelCase__ :int = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 185
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase__ :Dict = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def lowerCAmelCase__ ( a__: Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser('tpu-config' , description=_description )
else:
_UpperCAmelCase = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
_UpperCAmelCase = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=a__ , default=a__ , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=a__ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=a__ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
_UpperCAmelCase = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=a__ , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def lowerCAmelCase__ ( a__: str ) -> Any:
'''simple docstring'''
_UpperCAmelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(a__ ):
_UpperCAmelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_UpperCAmelCase = defaults.command_file
if not args.command and defaults.commands is not None:
_UpperCAmelCase = defaults.commands
if not args.tpu_name:
_UpperCAmelCase = defaults.tpu_name
if not args.tpu_zone:
_UpperCAmelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
_UpperCAmelCase = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
_UpperCAmelCase = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , a__ ):
_UpperCAmelCase = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
_UpperCAmelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , a__ ):
_UpperCAmelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_UpperCAmelCase = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
_UpperCAmelCase = '; '.join(a__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_UpperCAmelCase = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {" ".join(a__ )}''' )
return
subprocess.run(a__ )
print('Successfully setup pod.' )
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = tpu_command_parser()
_UpperCAmelCase = parser.parse_args()
tpu_command_launcher(a__ )
| 185
| 1
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowercase__ :Optional[Any] = logging.get_logger(__name__)
class lowercase ( __UpperCamelCase ):
def __init__( self ,*A__ ,**A__):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' ,lowerCAmelCase__ ,)
super().__init__(*lowerCAmelCase__ ,**lowerCAmelCase__)
| 101
|
import unittest
from knapsack import knapsack as k
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = 0
lowercase__: List[Any] = [0]
lowercase__: str = [0]
lowercase__: Tuple = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 )
lowercase__: Optional[Any] = [60]
lowercase__: Dict = [10]
lowercase__: str = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = 3
lowercase__: List[str] = [1, 2, 3]
lowercase__: Union[str, Any] = [3, 2, 1]
lowercase__: Union[str, Any] = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 5 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = 50
lowercase__: str = [60, 100, 120]
lowercase__: Any = [10, 20, 30]
lowercase__: List[Any] = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 196
| 0
|
"""simple docstring"""
from __future__ import annotations
class __a :
'''simple docstring'''
def __init__( self , _a , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = text, pattern
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = len(_a ), len(_a )
def _a ( self , _a ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _a ( self , _a ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _a ( self ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.mismatch_in_text(_a )
if mismatch_index == -1:
positions.append(_a )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
a :Tuple = "ABAABA"
a :List[str] = "AB"
a :Dict = BoyerMooreSearch(text, pattern)
a :Dict = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 56
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : int = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE__ : List[Any] = len(__lowerCAmelCase ) if (len(__lowerCAmelCase ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(__lowerCAmelCase ) , """Postfix""".center(__lowerCAmelCase ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCAmelCase ) == 0:
stack.append(__lowerCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCAmelCase ) # push x to stack
print(
x.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format
while len(__lowerCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format
return "".join(__lowerCAmelCase ) # return Postfix as str
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCAmelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE__ : Optional[int] = """)""" # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE__ : Optional[Any] = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(__lowerCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a :Optional[int] = input("\nEnter an Infix Equation = ") # Input an Infix equation
a :Dict = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 56
| 1
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__snake_case = (7_20, 12_80) # Height, Width
__snake_case = (0.4, 0.6) # if height or width lower than this scale, drop it.
__snake_case = 1 / 1_00
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = 2_50
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =get_dataset(__UpperCamelCase , __UpperCamelCase )
for index in range(__UpperCamelCase ):
UpperCAmelCase : List[Any] =random.sample(range(len(__UpperCamelCase ) ) , 4 )
UpperCAmelCase : Union[str, Any] =update_image_and_anno(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , filter_scale=__UpperCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase : Union[str, Any] =random_chars(32 )
UpperCAmelCase : int =path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCAmelCase : Tuple =f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , __UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
UpperCAmelCase : int =[]
for anno in new_annos:
UpperCAmelCase : List[Any] =anno[3] - anno[1]
UpperCAmelCase : Dict =anno[4] - anno[2]
UpperCAmelCase : Any =anno[1] + width / 2
UpperCAmelCase : Union[str, Any] =anno[2] + height / 2
UpperCAmelCase : Any =f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__UpperCamelCase )
with open(f'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =[]
UpperCAmelCase : Any =[]
for label_file in glob.glob(os.path.join(__UpperCamelCase , '''*.txt''' ) ):
UpperCAmelCase : List[Any] =label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__UpperCamelCase ) as in_file:
UpperCAmelCase : Any =in_file.readlines()
UpperCAmelCase : Any =os.path.join(__UpperCamelCase , f'''{label_name}.jpg''' )
UpperCAmelCase : Tuple =[]
for obj_list in obj_lists:
UpperCAmelCase : Union[str, Any] =obj_list.rstrip('''\n''' ).split(''' ''' )
UpperCAmelCase : int =float(obj[1] ) - float(obj[3] ) / 2
UpperCAmelCase : Optional[Any] =float(obj[2] ) - float(obj[4] ) / 2
UpperCAmelCase : Tuple =float(obj[1] ) + float(obj[3] ) / 2
UpperCAmelCase : Tuple =float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , )-> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
UpperCAmelCase : str =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase : int =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase : Dict =int(scale_x * output_size[1] )
UpperCAmelCase : List[Any] =int(scale_y * output_size[0] )
UpperCAmelCase : int =[]
UpperCAmelCase : List[str] =[]
for i, index in enumerate(__UpperCamelCase ):
UpperCAmelCase : List[Any] =all_img_list[index]
path_list.append(__UpperCamelCase )
UpperCAmelCase : Optional[Any] =all_annos[index]
UpperCAmelCase : Union[str, Any] =cva.imread(__UpperCamelCase )
if i == 0: # top-left
UpperCAmelCase : str =cva.resize(__UpperCamelCase , (divid_point_x, divid_point_y) )
UpperCAmelCase : List[Any] =img
for bbox in img_annos:
UpperCAmelCase : str =bbox[1] * scale_x
UpperCAmelCase : List[Any] =bbox[2] * scale_y
UpperCAmelCase : List[Any] =bbox[3] * scale_x
UpperCAmelCase : Optional[Any] =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
UpperCAmelCase : Optional[int] =cva.resize(__UpperCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
UpperCAmelCase : Dict =img
for bbox in img_annos:
UpperCAmelCase : Optional[int] =scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase : int =bbox[2] * scale_y
UpperCAmelCase : Optional[int] =scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase : Optional[int] =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
UpperCAmelCase : Optional[Any] =cva.resize(__UpperCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase : Optional[Any] =img
for bbox in img_annos:
UpperCAmelCase : int =bbox[1] * scale_x
UpperCAmelCase : Any =scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase : Tuple =bbox[3] * scale_x
UpperCAmelCase : int =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
UpperCAmelCase : List[str] =cva.resize(
__UpperCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase : List[str] =img
for bbox in img_annos:
UpperCAmelCase : Tuple =scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase : Union[str, Any] =scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase : Tuple =scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase : int =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
UpperCAmelCase : Optional[Any] =[
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase : Tuple =ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 348
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , ) -> List[Any]:
__lowercase : Any = size if size is not None else {'''height''': 18, '''width''': 18}
__lowercase : Dict = parent
__lowercase : Dict = batch_size
__lowercase : int = num_channels
__lowercase : Union[str, Any] = image_size
__lowercase : Optional[int] = min_resolution
__lowercase : List[str] = max_resolution
__lowercase : Dict = do_resize
__lowercase : Any = size
__lowercase : Any = do_normalize
__lowercase : int = image_mean
__lowercase : Tuple = image_std
def _lowerCamelCase ( self ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =DPTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = DPTImageProcessingTester(self )
@property
def _lowerCamelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__lowercase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowerCamelCase ( self ) -> Optional[int]:
# Initialize image_processing
__lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Optional[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> List[Any]:
# Initialize image_processing
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Any = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> Tuple:
# Initialize image_processing
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : str = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 249
| 0
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowercase__ ( datasets.BuilderConfig):
UpperCamelCase_ = None
def A ( _lowercase , _lowercase , ):
import pyspark
def generate_fn():
SCREAMING_SNAKE_CASE : str = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
SCREAMING_SNAKE_CASE : Optional[Any] = df_with_partition_id.select('''*''' ).where(f"""part_id = {partition_id}""" ).drop('''part_id''' )
SCREAMING_SNAKE_CASE : Tuple = partition_df.collect()
SCREAMING_SNAKE_CASE : str = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowercase__ ( _BaseExamplesIterable):
def __init__( self : List[str] , UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : int=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = df
SCREAMING_SNAKE_CASE : int = partition_order or range(self.df.rdd.getNumPartitions() )
SCREAMING_SNAKE_CASE : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
yield from self.generate_examples_fn()
def __A ( self : Dict , UpperCamelCase__ : np.random.Generator ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCamelCase__ )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.split_shard_indices_by_worker(UpperCamelCase__ , UpperCamelCase__ )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase__ )
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return len(self.partition_order )
class lowercase__ ( datasets.DatasetBuilder):
UpperCamelCase_ = SparkConfig
def __init__( self : int , UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : str = None , UpperCamelCase__ : str = None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
import pyspark
SCREAMING_SNAKE_CASE : Optional[int] = pyspark.sql.SparkSession.builder.getOrCreate()
SCREAMING_SNAKE_CASE : Union[str, Any] = df
SCREAMING_SNAKE_CASE : str = working_dir
super().__init__(
cache_dir=UpperCamelCase__ , config_name=str(self.df.semanticHash() ) , **UpperCamelCase__ , )
def __A ( self : List[Any] ):
'''simple docstring'''
def create_cache_and_write_probe(UpperCamelCase__ : Tuple ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCamelCase__ , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCamelCase__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : Optional[int] , UpperCamelCase__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __A ( self : str , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(UpperCamelCase__ : Union[str, Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
SCREAMING_SNAKE_CASE : Optional[Any] = self.df.count()
SCREAMING_SNAKE_CASE : Dict = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.df.limit(UpperCamelCase__ )
.repartition(1 )
.mapInArrow(UpperCamelCase__ , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
SCREAMING_SNAKE_CASE : Any = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
SCREAMING_SNAKE_CASE : Optional[int] = min(UpperCamelCase__ , int(approx_total_size / max_shard_size ) )
SCREAMING_SNAKE_CASE : Tuple = self.df.repartition(UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , ):
'''simple docstring'''
import pyspark
SCREAMING_SNAKE_CASE : int = ParquetWriter if file_format == '''parquet''' else ArrowWriter
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self._working_dir , os.path.basename(UpperCamelCase__ ) ) if self._working_dir else fpath
SCREAMING_SNAKE_CASE : Optional[int] = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
SCREAMING_SNAKE_CASE : Dict = self.config.features
SCREAMING_SNAKE_CASE : int = self._writer_batch_size
SCREAMING_SNAKE_CASE : List[Any] = self._fs.storage_options
def write_arrow(UpperCamelCase__ : List[Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
SCREAMING_SNAKE_CASE : Optional[Any] = pyspark.TaskContext().taskAttemptId()
SCREAMING_SNAKE_CASE : Tuple = next(UpperCamelCase__ , UpperCamelCase__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = writer_class(
features=UpperCamelCase__ , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=UpperCamelCase__ , storage_options=UpperCamelCase__ , embed_local_files=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Dict = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCamelCase__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
SCREAMING_SNAKE_CASE : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
SCREAMING_SNAKE_CASE : Dict = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=UpperCamelCase__ , storage_options=UpperCamelCase__ , embed_local_files=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = pa.Table.from_batches([batch] )
writer.write_table(UpperCamelCase__ )
if writer._num_bytes > 0:
SCREAMING_SNAKE_CASE : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : str = os.path.join(os.path.dirname(UpperCamelCase__ ) , os.path.basename(UpperCamelCase__ ) )
shutil.move(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = (
self.df.mapInArrow(UpperCamelCase__ , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __A ( self : Optional[Any] , UpperCamelCase__ : "datasets.SplitGenerator" , UpperCamelCase__ : str = "arrow" , UpperCamelCase__ : Optional[Union[str, int]] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
self._validate_cache_dir()
SCREAMING_SNAKE_CASE : Dict = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = not is_remote_filesystem(self._fs )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join if is_local else posixpath.join
SCREAMING_SNAKE_CASE : Tuple = '''-TTTTT-SSSSS-of-NNNNN'''
SCREAMING_SNAKE_CASE : List[str] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
SCREAMING_SNAKE_CASE : Union[str, Any] = path_join(self._output_dir , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Dict = []
for task_id, content in self._prepare_split_single(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
(
SCREAMING_SNAKE_CASE
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = total_num_examples
SCREAMING_SNAKE_CASE : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
SCREAMING_SNAKE_CASE : List[str] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
SCREAMING_SNAKE_CASE : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , ):
rename(
UpperCamelCase__ , fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , f"""{global_shard_id:05d}""" ).replace('''NNNNN''' , f"""{total_shards:05d}""" ) , )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : str = task_id_and_num_shards[i]
for shard_id in range(UpperCamelCase__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCamelCase__ , len(UpperCamelCase__ ) ).map(lambda UpperCamelCase__ : _rename_shard(*UpperCamelCase__ ) ).collect()
else:
# don't use any pattern
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace(UpperCamelCase__ , '''''' ) , )
def __A ( self : List[Any] , UpperCamelCase__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 367
|
from __future__ import annotations
import numpy as np
def A ( _lowercase ):
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 258
| 0
|
from collections import Counter
from timeit import timeit
def lowercase_ ( _A : str = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def lowercase_ ( _A : str = "" ):
"""simple docstring"""
if len(__lowerCAmelCase ) == 0:
return True
lowerCamelCase__ : Tuple = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCamelCase__ : List[Any] = {}
for character in lower_case_input_str:
lowerCamelCase__ : Tuple = character_freq_dict.get(__lowerCAmelCase , 0 ) + 1
lowerCamelCase__ : Optional[int] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase_ ( _A : str = "" ):
"""simple docstring"""
print("\nFor string = " , __lowerCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(__lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(__lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
A : str = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
A : str = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 184
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 270
| 0
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( A_ ):
lowercase__ = ['''input_features''', '''is_longer''']
def __init__( self : str , snake_case_ : Any=64 , snake_case_ : Tuple=48_000 , snake_case_ : Optional[Any]=480 , snake_case_ : Any=10 , snake_case_ : Tuple=1_024 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[Any]=False , snake_case_ : float = 0 , snake_case_ : float = 14_000 , snake_case_ : int = None , snake_case_ : str = "fusion" , snake_case_ : str = "repeatpad" , **snake_case_ : List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
A__ = top_db
A__ = truncation
A__ = padding
A__ = fft_window_size
A__ = (fft_window_size >> 1) + 1
A__ = hop_length
A__ = max_length_s
A__ = max_length_s * sampling_rate
A__ = sampling_rate
A__ = frequency_min
A__ = frequency_max
A__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm=snake_case_ , mel_scale="htk" , )
A__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm="slaney" , mel_scale="slaney" , )
def __magic_name__ ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__ )
A__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __magic_name__ ( self : List[Any] , snake_case_ : np.array , snake_case_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
A__ = spectrogram(
snake_case_ , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case_ , log_mel="dB" , )
return log_mel_spectrogram.T
def __magic_name__ ( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[str] ) -> Any:
'''simple docstring'''
A__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A__ = [0]
# randomly choose index for each part
A__ = np.random.choice(ranges[0] )
A__ = np.random.choice(ranges[1] )
A__ = np.random.choice(ranges[2] )
A__ = mel[idx_front : idx_front + chunk_frames, :]
A__ = mel[idx_middle : idx_middle + chunk_frames, :]
A__ = mel[idx_back : idx_back + chunk_frames, :]
A__ = torch.tensor(mel[None, None, :] )
A__ = torch.nn.functional.interpolate(
snake_case_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=snake_case_ )
A__ = mel_shrink[0][0].numpy()
A__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __magic_name__ ( self : Optional[Any] , snake_case_ : np.array , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Dict ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A__ = len(snake_case_ ) - max_length
A__ = np.random.randint(0 , overflow + 1 )
A__ = waveform[idx : idx + max_length]
A__ = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A__ = self._np_extract_fbank_features(snake_case_ , self.mel_filters )
A__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A__ = np.stack([mel, mel, mel, mel] , axis=0 )
A__ = False
else:
A__ = self._random_mel_fusion(snake_case_ , snake_case_ , snake_case_ )
A__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
A__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A__ = int(max_length / len(snake_case_ ) )
A__ = np.stack(np.tile(snake_case_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A__ = int(max_length / len(snake_case_ ) )
A__ = np.stack(np.tile(snake_case_ , snake_case_ ) )
A__ = np.pad(snake_case_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
A__ = self._np_extract_fbank_features(snake_case_ , self.mel_filters )
A__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
A__ = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : str , snake_case_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case_ : str = None , snake_case_ : Optional[str] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : Union[str, Any] , ) -> BatchFeature:
'''simple docstring'''
A__ = truncation if truncation is not None else self.truncation
A__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
A__ = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
A__ = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(snake_case_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
A__ = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [np.asarray(snake_case_ )]
# convert to mel spectrogram, truncate and pad if needed.
A__ = [
self._get_input_mel(snake_case_ , max_length if max_length else self.nb_max_samples , snake_case_ , snake_case_ )
for waveform in raw_speech
]
A__ = []
A__ = []
for mel, longer in padded_inputs:
input_mel.append(snake_case_ )
is_longer.append(snake_case_ )
if truncation == "fusion" and sum(snake_case_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A__ = np.random.randint(0 , len(snake_case_ ) )
A__ = True
if isinstance(input_mel[0] , snake_case_ ):
A__ = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A__ = [[longer] for longer in is_longer]
A__ = {"input_features": input_mel, "is_longer": is_longer}
A__ = BatchFeature(snake_case_ )
if return_tensors is not None:
A__ = input_features.convert_to_tensors(snake_case_ )
return input_features
| 230
|
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = model.config
A__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
A__ = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
if "encoder.model" in name:
A__ = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
A__ = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
A__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
A__ = "encoder." + name
if "attn.proj" in name:
A__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
A__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
A__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
A__ = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
A__ = "encoder.layernorm.bias"
return name
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
A__ = key.split("." )
A__ = int(key_split[3] )
A__ = int(key_split[5] )
A__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
A__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=False ) -> Dict:
# load original model
A__ = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
A__, A__ = get_configs(lowercase_ )
A__ = DonutSwinModel(lowercase_ )
A__ = MBartForCausalLM(lowercase_ )
A__ = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
A__ = original_model.state_dict()
A__ = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
A__ = load_dataset("hf-internal-testing/example-documents" )
A__ = dataset["test"][0]["image"].convert("RGB" )
A__ = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
A__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
A__ = DonutProcessor(lowercase_ , lowercase_ )
A__ = processor(lowercase_ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
A__ = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
A__ = "When is the coffee break?"
A__ = task_prompt.replace("{user_input}" , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
A__ = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
A__ = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
A__ = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
A__ = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
A__ = "hello world"
else:
raise ValueError("Model name not supported" )
A__ = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors="pt" )[
"input_ids"
]
A__ = original_model.encoder.model.patch_embed(lowercase_ )
A__, A__ = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
# verify encoder hidden states
A__ = original_model.encoder(lowercase_ )
A__ = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-2 )
# verify decoder hidden states
A__ = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
A__ = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 230
| 1
|
"""simple docstring"""
from PIL import Image
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = (259 * (level + 255)) / (255 * (259 - level))
def contrast(lowercase_ ) -> int:
return int(128 + factor * (c - 128) )
return img.point(lowercase_ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
snake_case_ = change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 78
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case_ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78
| 1
|
"""simple docstring"""
import cmath
import math
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = math.radians(_UpperCamelCase )
__lowerCAmelCase = math.radians(_UpperCamelCase )
# Convert voltage and current to rectangular form
__lowerCAmelCase = cmath.rect(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = cmath.rect(_UpperCamelCase , _UpperCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [[] for _ in range(_UpperCamelCase )]
__lowerCAmelCase = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(_UpperCamelCase ) <= key:
return input_string
for position, character in enumerate(_UpperCamelCase ):
__lowerCAmelCase = position % (lowest * 2) # puts it in bounds
__lowerCAmelCase = min(_UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_UpperCamelCase )
__lowerCAmelCase = ["".join(_UpperCamelCase ) for row in temp_grid]
__lowerCAmelCase = "".join(_UpperCamelCase )
return output_string
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
__lowerCAmelCase = [[] for _ in range(_UpperCamelCase )] # generates template
for position in range(len(_UpperCamelCase ) ):
__lowerCAmelCase = position % (lowest * 2) # puts it in bounds
__lowerCAmelCase = min(_UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
__lowerCAmelCase = 0
for row in temp_grid: # fills in the characters
__lowerCAmelCase = input_string[counter : counter + len(_UpperCamelCase )]
grid.append(list(_UpperCamelCase ) )
counter += len(_UpperCamelCase )
__lowerCAmelCase = "" # reads as zigzag
for position in range(len(_UpperCamelCase ) ):
__lowerCAmelCase = position % (lowest * 2) # puts it in bounds
__lowerCAmelCase = min(_UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {}
for key_guess in range(1 , len(_UpperCamelCase ) ): # tries every key
__lowerCAmelCase = decrypt(_UpperCamelCase , _UpperCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = '''ZinengTang/tvlt-base'''
lowerCAmelCase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[int]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.ones([12000] )
lowerCAmelCase = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
lowerCAmelCase = processor(audio=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.ones([3, 224, 224] )
lowerCAmelCase = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
lowerCAmelCase = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.ones([12000] )
lowerCAmelCase = np.ones([3, 224, 224] )
lowerCAmelCase = processor(audio=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 338
|
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase : Union[str, Any] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCAmelCase_ ( snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A : Tuple = create_model(
'''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = {}
A : str = R'''.*sequential.(\d+).*'''
A : Union[str, Any] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A : Any = key.replace(snake_case__ , snake_case__ )
if re.match(snake_case__ , snake_case__ ):
# replace sequential layers with list
A : Any = re.match(snake_case__ , snake_case__ ).group(1 )
A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' )
elif re.match(snake_case__ , snake_case__ ):
A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
A : str = 1 if projecton_layer == 0 else 2
A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
A : int = value
A : List[Any] = mixed_qkv.size(0 ) // 3
A : Union[str, Any] = mixed_qkv[:qkv_dim]
A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
A : Optional[int] = mixed_qkv[qkv_dim * 2 :]
A : Tuple = query_layer
A : Union[str, Any] = key_layer
A : Optional[int] = value_layer
else:
A : Dict = value
return model_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ )
clap_model.eval()
A : str = clap_model.state_dict()
A : Union[str, Any] = rename_state_dict(snake_case__ )
A : Tuple = ClapConfig()
A : str = enable_fusion
A : str = ClapModel(snake_case__ )
# ignore the spectrogram embedding layer
model.load_state_dict(snake_case__ , strict=snake_case__ )
model.save_pretrained(snake_case__ )
transformers_config.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowercase : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 3
| 0
|
"""simple docstring"""
from math import factorial
lowerCamelCase_ : int = {str(d): factorial(d) for d in range(10)}
def A__ ( lowerCamelCase ) -> List[Any]:
return sum(DIGIT_FACTORIAL[d] for d in str(a_ ) )
def A__ ( ) -> List[str]:
UpperCamelCase_: Optional[Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , a_ ) if sum_of_digit_factorial(a_ ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 367
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase_ : Any = 5_00_00
lowerCamelCase_ : Any = 50_00
lowerCamelCase_ , lowerCamelCase_ : Tuple = os.path.split(__file__)
lowerCamelCase_ : int = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def A__ ( lowerCamelCase , lowerCamelCase ) -> Tuple:
for i in range(lowerCamelCase ):
UpperCamelCase_: Dict = dataset[i]
@get_duration
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
for i in range(0 , len(lowerCamelCase ) , lowerCamelCase ):
UpperCamelCase_: List[Any] = dataset[i : i + batch_size]
@get_duration
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
with dataset.formatted_as(type=lowerCamelCase ):
for i in range(lowerCamelCase ):
UpperCamelCase_: List[str] = dataset[i]
@get_duration
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
with dataset.formatted_as(type=lowerCamelCase ):
for i in range(0 , lowerCamelCase , lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = dataset[i : i + batch_size]
def A__ ( ) -> Tuple:
UpperCamelCase_: int = {"""num examples""": SPEED_TEST_N_EXAMPLES}
UpperCamelCase_: Union[str, Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
UpperCamelCase_: Tuple = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
UpperCamelCase_: int = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
UpperCamelCase_: Optional[int] = generate_example_dataset(
os.path.join(lowerCamelCase , """dataset.arrow""" ) , lowerCamelCase , num_examples=lowerCamelCase , seq_shapes={"""list""": (1_00,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCamelCase ) )
UpperCamelCase_: List[Any] = func(lowerCamelCase , **lowerCamelCase )
print("""shuffling dataset""" )
UpperCamelCase_: Dict = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(lowerCamelCase ) )
UpperCamelCase_: List[Any] = func(
lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 223
| 0
|
__UpperCAmelCase = '''Tobias Carryer'''
from time import time
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=int(time() ) ) -> Optional[int]: # noqa: B008
UpperCamelCase : Optional[int] = multiplier
UpperCamelCase : int = increment
UpperCamelCase : Tuple = modulo
UpperCamelCase : Optional[int] = seed
def snake_case_ ( self ) -> Any:
UpperCamelCase : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__UpperCAmelCase = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 119
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Any=0.999 , snake_case__ : List[Any]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : int ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCamelCase : List[Any] = []
for i in range(snake_case__ ):
UpperCamelCase : Optional[Any] = i / num_diffusion_timesteps
UpperCamelCase : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( a__ , a__ ):
UpperCAmelCase__ : List[Any] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : List[str] = 2
@register_to_config
def __init__( self, SCREAMING_SNAKE_CASE_ = 1000, SCREAMING_SNAKE_CASE_ = 0.0_00_85, SCREAMING_SNAKE_CASE_ = 0.0_12, SCREAMING_SNAKE_CASE_ = "linear", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "epsilon", SCREAMING_SNAKE_CASE_ = "linspace", SCREAMING_SNAKE_CASE_ = 0, ) -> List[str]:
if trained_betas is not None:
UpperCamelCase : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase : int = (
torch.linspace(beta_start**0.5, beta_end**0.5, SCREAMING_SNAKE_CASE_, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase : Optional[int] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCamelCase : Optional[int] = 1.0 - self.betas
UpperCamelCase : int = torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> str:
if schedule_timesteps is None:
UpperCamelCase : Union[str, Any] = self.timesteps
UpperCamelCase : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCamelCase : Optional[int] = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
UpperCamelCase : str = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
UpperCamelCase : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case_ ( self ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> torch.FloatTensor:
UpperCamelCase : Optional[int] = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
if self.state_in_first_order:
UpperCamelCase : Dict = self.sigmas[step_index]
else:
UpperCamelCase : int = self.sigmas_interpol[step_index]
UpperCamelCase : Any = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, ) -> Optional[int]:
UpperCamelCase : Dict = num_inference_steps
UpperCamelCase : List[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCamelCase : int = np.linspace(0, num_train_timesteps - 1, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCamelCase : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase : Optional[Any] = (np.arange(0, SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCamelCase : Optional[int] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase : Any = (np.arange(SCREAMING_SNAKE_CASE_, 0, -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCamelCase : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCamelCase : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = np.interp(SCREAMING_SNAKE_CASE_, np.arange(0, len(SCREAMING_SNAKE_CASE_ ) ), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCamelCase : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
# interpolate sigmas
UpperCamelCase : Union[str, Any] = sigmas.log().lerp(sigmas.roll(1 ).log(), 0.5 ).exp()
UpperCamelCase : Any = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
UpperCamelCase : Optional[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
# mps does not support float64
UpperCamelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
else:
UpperCamelCase : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
# interpolate timesteps
UpperCamelCase : int = self.sigma_to_t(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_, dtype=timesteps.dtype )
UpperCamelCase : List[str] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1 ).flatten()
UpperCamelCase : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps] )
UpperCamelCase : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCamelCase : Dict = defaultdict(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# get log sigma
UpperCamelCase : List[Any] = sigma.log()
# get distribution
UpperCamelCase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
UpperCamelCase : Optional[int] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
UpperCamelCase : Tuple = low_idx + 1
UpperCamelCase : List[str] = self.log_sigmas[low_idx]
UpperCamelCase : Optional[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
UpperCamelCase : int = (low - log_sigma) / (low - high)
UpperCamelCase : Tuple = w.clamp(0, 1 )
# transform interpolation to time range
UpperCamelCase : List[str] = (1 - w) * low_idx + w * high_idx
UpperCamelCase : Dict = t.view(sigma.shape )
return t
@property
def snake_case_ ( self ) -> Optional[int]:
return self.sample is None
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True, ) -> Union[SchedulerOutput, Tuple]:
UpperCamelCase : str = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
UpperCamelCase : Optional[int] = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCamelCase : Tuple = self.sigmas[step_index]
UpperCamelCase : Dict = self.sigmas_interpol[step_index + 1]
UpperCamelCase : Optional[int] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
UpperCamelCase : str = self.sigmas[step_index - 1]
UpperCamelCase : Dict = self.sigmas_interpol[step_index]
UpperCamelCase : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCamelCase : Dict = 0
UpperCamelCase : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCamelCase : Any = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase : List[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCamelCase : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCamelCase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
UpperCamelCase : Tuple = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
UpperCamelCase : Union[str, Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
UpperCamelCase : Dict = sigma_next - sigma_hat
UpperCamelCase : Any = self.sample
UpperCamelCase : str = None
UpperCamelCase : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase : Optional[Any] = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
UpperCamelCase : List[str] = self.timesteps.to(original_samples.device, dtype=torch.floataa )
UpperCamelCase : str = timesteps.to(original_samples.device, dtype=torch.floataa )
else:
UpperCamelCase : Dict = self.timesteps.to(original_samples.device )
UpperCamelCase : int = timesteps.to(original_samples.device )
UpperCamelCase : str = [self.index_for_timestep(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for t in timesteps]
UpperCamelCase : List[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCamelCase : int = sigma.unsqueeze(-1 )
UpperCamelCase : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 119
| 1
|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : List[str] , UpperCAmelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Optional[int]:
super().__init__()
lowerCamelCase__ : int = nn.ModuleList(UpperCAmelCase )
def A_ ( self : List[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : torch.Tensor , UpperCAmelCase : List[torch.tensor] , UpperCAmelCase : List[float] , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[Dict[str, Any]] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(UpperCAmelCase , UpperCAmelCase , self.nets ) ):
lowerCamelCase__ : int = controlnet(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
# merge samples
if i == 0:
lowerCamelCase__ : str = down_samples, mid_sample
else:
lowerCamelCase__ : Union[str, Any] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCAmelCase , UpperCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A_ ( self : Union[str, Any] , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = True , UpperCAmelCase : Callable = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[str] = None , ) -> List[str]:
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : int = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCAmelCase , is_main_process=UpperCAmelCase , save_function=UpperCAmelCase , safe_serialization=UpperCAmelCase , variant=UpperCAmelCase , )
idx += 1
lowerCamelCase__ : List[Any] = model_path_to_save + F"""_{idx}"""
@classmethod
def A_ ( cls : List[str] , UpperCAmelCase : Optional[Union[str, os.PathLike]] , **UpperCAmelCase : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : str = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowerCamelCase__ : str = pretrained_model_path
while os.path.isdir(UpperCAmelCase ):
lowerCamelCase__ : List[Any] = ControlNetModel.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
controlnets.append(UpperCAmelCase )
idx += 1
lowerCamelCase__ : Optional[Any] = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(UpperCAmelCase )} controlnets loaded from {pretrained_model_path}.""" )
if len(UpperCAmelCase ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(UpperCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(UpperCAmelCase )
| 363
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> float:
return 10 - x * x
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(_UpperCAmelCase ) * equation(_UpperCAmelCase ) >= 0:
raise ValueError('Wrong space!' )
lowerCamelCase__ : Tuple = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(_UpperCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_UpperCAmelCase ) * equation(_UpperCAmelCase ) < 0:
lowerCamelCase__ : Tuple = c
else:
lowerCamelCase__ : Optional[int] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 45
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase=[1, 16, 4, 4] , __UpperCAmelCase=None , ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : List[str] = patch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : str = use_labels
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = type_sequence_label_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Any = scope
__UpperCAmelCase : Optional[Any] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCAmelCase : str = (self.image_size // 32) ** 2
__UpperCAmelCase : Union[str, Any] = num_patches + 1
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : str = None
if self.use_labels:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__UpperCAmelCase , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : str = ViTHybridModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : int = self.type_sequence_label_size
__UpperCAmelCase : List[str] = ViTHybridForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : str = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : int = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Dict = False
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = ViTHybridModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def __A ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(__UpperCAmelCase )
__UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : str = [*signature.parameters.keys()]
__UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class(config=__UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCAmelCase : Union[str, Any] = [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = ViTHybridModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCAmelCase )
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : int = prepare_img()
__UpperCAmelCase : Optional[Any] = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : int = model(**__UpperCAmelCase )
# verify the logits
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
__UpperCAmelCase : Any = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
__UpperCAmelCase : List[Any] = prepare_img()
__UpperCAmelCase : int = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" )
__UpperCAmelCase : Dict = model(**__UpperCAmelCase )
__UpperCAmelCase : Dict = outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCAmelCase : int = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 254
| 0
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_UpperCAmelCase = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
A_ : Dict = TOKEN
HfFolder.save_token(lowercase )
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
A_ : int = FlaxBertModel(lowercase )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
A_ : Tuple = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ : int = flatten_dict(unfreeze(model.params ) )
A_ : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase , repo_id='test-model-flax' , push_to_hub=lowercase , use_auth_token=self._token )
A_ : Any = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ : List[str] = flatten_dict(unfreeze(model.params ) )
A_ : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase , 1E-3 , msg=F'''{key} not identical''' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
A_ : str = FlaxBertModel(lowercase )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
A_ : Tuple = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
A_ : Optional[int] = flatten_dict(unfreeze(model.params ) )
A_ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowercase , repo_id='valid_org/test-model-flax-org' , push_to_hub=lowercase , use_auth_token=self._token )
A_ : int = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
A_ : Dict = flatten_dict(unfreeze(model.params ) )
A_ : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase , 1E-3 , msg=F'''{key} not identical''' )
def UpperCamelCase ( __lowercase : Dict ,__lowercase : Optional[Any] ):
'''simple docstring'''
A_ : List[Any] = True
A_ : List[str] = flatten_dict(modela.params )
A_ : int = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
A_ : Dict = False
return models_are_equal
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
A_ : int = FlaxBertModel(lowercase )
A_ : Optional[Any] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase , lowercase ) )
with self.assertRaises(lowercase ):
A_ : int = FlaxBertModel.from_pretrained(lowercase )
A_ : Tuple = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase )
self.assertTrue(check_models_equal(lowercase , lowercase ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
A_ : str = FlaxBertModel(lowercase )
A_ : Tuple = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase , lowercase ) , max_shard_size='10KB' )
with self.assertRaises(lowercase ):
A_ : Optional[int] = FlaxBertModel.from_pretrained(lowercase )
A_ : str = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase )
self.assertTrue(check_models_equal(lowercase , lowercase ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = 'bert'
A_ : List[str] = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(lowercase ):
A_ : Dict = FlaxBertModel.from_pretrained(lowercase )
A_ : Optional[int] = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase )
self.assertIsNotNone(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = 'bert'
A_ : str = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(lowercase ):
A_ : Dict = FlaxBertModel.from_pretrained(lowercase )
A_ : Dict = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase )
self.assertIsNotNone(lowercase )
| 192
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''mask2former'''
lowerCamelCase_ = ['''swin''']
lowerCamelCase_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , lowercase = None , lowercase = 2_5_6 , lowercase = 2_5_6 , lowercase = 2_5_6 , lowercase = 1_0_2_4 , lowercase = "relu" , lowercase = 6 , lowercase = 1_0 , lowercase = 8 , lowercase = 0.0 , lowercase = 2_0_4_8 , lowercase = False , lowercase = False , lowercase = 4 , lowercase = 2_5_5 , lowercase = 1_0_0 , lowercase = 0.1 , lowercase = 2.0 , lowercase = 5.0 , lowercase = 5.0 , lowercase = 1_2_5_4_4 , lowercase = 3.0 , lowercase = 0.75 , lowercase = 0.02 , lowercase = 1.0 , lowercase = True , lowercase = [4, 8, 1_6, 3_2] , lowercase = None , **lowercase , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
A_ : List[str] = CONFIG_MAPPING['swin'](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowercase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowercase , lowercase ):
A_ : str = backbone_config.pop('model_type' )
A_ : List[str] = CONFIG_MAPPING[backbone_model_type]
A_ : Tuple = config_class.from_dict(lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
A_ : List[Any] = backbone_config
A_ : Optional[Any] = feature_size
A_ : int = mask_feature_size
A_ : Tuple = hidden_dim
A_ : Dict = encoder_feedforward_dim
A_ : int = activation_function
A_ : str = encoder_layers
A_ : Tuple = decoder_layers
A_ : Tuple = num_attention_heads
A_ : str = dropout
A_ : List[str] = dim_feedforward
A_ : List[str] = pre_norm
A_ : Tuple = enforce_input_projection
A_ : Dict = common_stride
A_ : Union[str, Any] = ignore_value
A_ : List[Any] = num_queries
A_ : List[Any] = no_object_weight
A_ : int = class_weight
A_ : int = mask_weight
A_ : Optional[Any] = dice_weight
A_ : int = train_num_points
A_ : Optional[int] = oversample_ratio
A_ : Tuple = importance_sample_ratio
A_ : Union[str, Any] = init_std
A_ : List[Any] = init_xavier_std
A_ : Optional[Any] = use_auxiliary_loss
A_ : Dict = feature_strides
A_ : List[Any] = output_auxiliary_logits
A_ : Optional[int] = decoder_layers
super().__init__(**lowercase )
@classmethod
def lowerCAmelCase_ ( cls , lowercase , **lowercase ):
"""simple docstring"""
return cls(
backbone_config=lowercase , **lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = copy.deepcopy(self.__dict__ )
A_ : Optional[int] = self.backbone_config.to_dict()
A_ : Dict = self.__class__.model_type
return output
| 192
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : str = (32, 32)
__UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCamelCase )
return image
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
return CLIPTextModel(__UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[Any] = self.dummy_cond_unet_upscale
__UpperCAmelCase : int = DDPMScheduler()
__UpperCAmelCase : Union[str, Any] = DDIMScheduler(prediction_type="""v_prediction""" )
__UpperCAmelCase : Any = self.dummy_vae
__UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
__UpperCAmelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCAmelCase : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Dict = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : str = StableDiffusionUpscalePipeline(
unet=__UpperCamelCase , low_res_scheduler=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , max_noise_level=350 , )
__UpperCAmelCase : Any = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
__UpperCAmelCase : Optional[Any] = """A painting of a squirrel eating a burger"""
__UpperCAmelCase : Any = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
__UpperCAmelCase : List[str] = sd_pipe(
[prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
__UpperCAmelCase : Dict = sd_pipe(
[prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCamelCase , )[0]
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
__UpperCAmelCase : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__UpperCAmelCase : int = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : str = self.dummy_cond_unet_upscale
__UpperCAmelCase : List[Any] = DDPMScheduler()
__UpperCAmelCase : Tuple = DDIMScheduler(prediction_type="""v_prediction""" )
__UpperCAmelCase : Any = self.dummy_vae
__UpperCAmelCase : List[str] = self.dummy_text_encoder
__UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCAmelCase : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : List[Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : Union[str, Any] = StableDiffusionUpscalePipeline(
unet=__UpperCamelCase , low_res_scheduler=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , max_noise_level=350 , )
__UpperCAmelCase : Dict = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
__UpperCAmelCase : List[str] = """A painting of a squirrel eating a burger"""
__UpperCAmelCase : Union[str, Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
__UpperCAmelCase : List[str] = output.images
assert image.shape[0] == 2
__UpperCAmelCase : Optional[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = sd_pipe(
[prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
__UpperCAmelCase : Union[str, Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet_upscale
__UpperCAmelCase : List[str] = DDPMScheduler()
__UpperCAmelCase : Optional[Any] = DDIMScheduler(prediction_type="""v_prediction""" )
__UpperCAmelCase : Dict = self.dummy_vae
__UpperCAmelCase : Any = self.dummy_text_encoder
__UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCAmelCase : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__UpperCAmelCase : int = unet.half()
__UpperCAmelCase : Tuple = text_encoder.half()
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : Optional[int] = StableDiffusionUpscalePipeline(
unet=__UpperCamelCase , low_res_scheduler=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , max_noise_level=350 , )
__UpperCAmelCase : Any = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
__UpperCAmelCase : List[Any] = """A painting of a squirrel eating a burger"""
__UpperCAmelCase : int = torch.manual_seed(0 )
__UpperCAmelCase : List[str] = sd_pipe(
[prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ).images
__UpperCAmelCase : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__UpperCAmelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
__UpperCAmelCase : List[str] = """stabilityai/stable-diffusion-x4-upscaler"""
__UpperCAmelCase : int = StableDiffusionUpscalePipeline.from_pretrained(__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase : int = """a cat sitting on a park bench"""
__UpperCAmelCase : List[Any] = torch.manual_seed(0 )
__UpperCAmelCase : int = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , generator=__UpperCamelCase , output_type="""np""" , )
__UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__UpperCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
__UpperCAmelCase : Dict = """stabilityai/stable-diffusion-x4-upscaler"""
__UpperCAmelCase : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase : List[str] = """a cat sitting on a park bench"""
__UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
__UpperCAmelCase : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , generator=__UpperCamelCase , output_type="""np""" , )
__UpperCAmelCase : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__UpperCAmelCase : Dict = """stabilityai/stable-diffusion-x4-upscaler"""
__UpperCAmelCase : Dict = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : List[Any] = """a cat sitting on a park bench"""
__UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
__UpperCAmelCase : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , output_type="""np""" , )
__UpperCAmelCase : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 115
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Union[str, Any] = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 360
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 74
| 0
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self : Union[str, Any] , lowercase_ : bool , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None ):
super().__init__()
snake_case_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ = torch.zeros(lowercase_ , lowercase_ )
else:
snake_case_ = None
snake_case_ = torch.nn.Parameter(lowercase_ )
class a ( _lowerCamelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self : Any , lowercase_ : VQModel , lowercase_ : CLIPTextModel , lowercase_ : CLIPTokenizer , lowercase_ : TransformeraDModel , lowercase_ : VQDiffusionScheduler , lowercase_ : LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=lowercase_ , transformer=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , scheduler=lowercase_ , learned_classifier_free_sampling_embeddings=lowercase_ , )
def A_ ( self : Optional[int] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Any ):
snake_case_ = len(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else 1
# get prompt text embeddings
snake_case_ = self.tokenizer(
lowercase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
snake_case_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
snake_case_ = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowercase_ )
# duplicate text embeddings for each generation per prompt
snake_case_ = prompt_embeds.repeat_interleave(lowercase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ = negative_prompt_embeds.unsqueeze(0 ).repeat(lowercase_ , 1 , 1 )
else:
snake_case_ = [''''''] * batch_size
snake_case_ = text_input_ids.shape[-1]
snake_case_ = self.tokenizer(
lowercase_ , padding='''max_length''' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='''pt''' , )
snake_case_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowercase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = negative_prompt_embeds.shape[1]
snake_case_ = negative_prompt_embeds.repeat(1 , lowercase_ , 1 )
snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Union[str, Any] , lowercase_ : Union[str, List[str]] , lowercase_ : int = 100 , lowercase_ : float = 5.0 , lowercase_ : float = 1.0 , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ):
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = 1
elif isinstance(lowercase_ , lowercase_ ):
snake_case_ = len(lowercase_ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}" )
snake_case_ = batch_size * num_images_per_prompt
snake_case_ = guidance_scale > 1.0
snake_case_ = self._encode_prompt(lowercase_ , lowercase_ , lowercase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_ )}." )
# get the initial completely masked latents unless the user supplied it
snake_case_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ = self.transformer.num_vector_embeds - 1
snake_case_ = torch.full(lowercase_ , lowercase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F" {self.transformer.num_vector_embeds - 1} (inclusive)." )
snake_case_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
snake_case_ = self.scheduler.timesteps.to(self.device )
snake_case_ = latents
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ = self.transformer(lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ ).sample
if do_classifier_free_guidance:
snake_case_ ,snake_case_ = model_output.chunk(2 )
snake_case_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowercase_ , dim=1 , keepdim=lowercase_ )
snake_case_ = self.truncate(lowercase_ , lowercase_ )
# remove `log(0)`'s (`-inf`s)
snake_case_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_ )
snake_case_ = self.vqvae.config.vq_embed_dim
snake_case_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ = self.vqvae.quantize.get_codebook_entry(lowercase_ , shape=lowercase_ )
snake_case_ = self.vqvae.decode(lowercase_ , force_not_quantize=lowercase_ ).sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : torch.FloatTensor , lowercase_ : float ):
snake_case_ ,snake_case_ = torch.sort(lowercase_ , 1 , descending=lowercase_ )
snake_case_ = torch.exp(lowercase_ )
snake_case_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ = torch.full_like(keep_mask[:, 0:1, :] , lowercase_ )
snake_case_ = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ = keep_mask[:, :-1, :]
snake_case_ = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ = log_p_x_0.clone()
snake_case_ = -torch.inf # -inf = log(0)
return rv
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : Tuple = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 56
| 1
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCamelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
UpperCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCamelCase = dict(zip(vocab, range(len(vocab))))
UpperCamelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(tmpdirname)
UpperCamelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
UpperCamelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
UpperCamelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
UpperCamelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCamelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCamelCase = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
UpperCamelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
UpperCamelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 333
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
| 333
| 1
|
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : int = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A_ : Union[str, Any] = """https://pypi.org/pypi/diffusers/json"""
A_ : List[str] = json.loads(request.urlopen(a_ ).read() )["""releases"""].keys()
return sorted(a_ , key=lambda a_ : version.Version(a_ ) )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a_ )
os.makedirs(a_ , exist_ok=a_ )
A_ : Any = Path(a_ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
init_hf_modules()
A_ : Optional[Any] = Path(a_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a_ , exist_ok=a_ )
A_ : Optional[int] = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
with open(a_ , """r""" , encoding="""utf-8""" ) as f:
A_ : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
A_ : Optional[Any] = re.findall("""^\s*import\s+\.(\S+)\s*$""" , a_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , a_ , flags=re.MULTILINE )
# Unique-ify
return list(set(a_ ) )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : str = False
A_ : List[str] = [module_file]
A_ : int = []
# Let's recurse through all relative imports
while not no_change:
A_ : Optional[int] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a_ ) )
A_ : Tuple = Path(a_ ).parent
A_ : Any = [str(module_path / m ) for m in new_imports]
A_ : str = [f for f in new_import_files if f not in all_relative_imports]
A_ : int = [F"{f}.py" for f in new_import_files]
A_ : Dict = len(a_ ) == 0
all_relative_imports.extend(a_ )
return all_relative_imports
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
with open(a_ , """r""" , encoding="""utf-8""" ) as f:
A_ : Optional[int] = f.read()
# Imports of the form `import xxx`
A_ : Dict = re.findall("""^\s*import\s+(\S+)\s*$""" , a_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , a_ , flags=re.MULTILINE )
# Only keep the top-level module
A_ : int = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
A_ : List[str] = list(set(a_ ) )
A_ : List[Any] = []
for imp in imports:
try:
importlib.import_module(a_ )
except ImportError:
missing_packages.append(a_ )
if len(a_ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F"{', '.join(a_ )}. Run `pip install {' '.join(a_ )}`" )
return get_relative_imports(a_ )
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : Dict = module_path.replace(os.path.sep , """.""" )
A_ : int = importlib.import_module(a_ )
if class_name is None:
return find_pipeline_class(a_ )
return getattr(a_ , a_ )
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
A_ : Optional[Any] = dict(inspect.getmembers(a_ , inspect.isclass ) )
A_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , a_ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
A_ : Dict = cls
return pipeline_class
def UpperCAmelCase ( a_ , a_ , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , a_ = None , a_ = False , ) -> Union[str, Any]:
"""simple docstring"""
A_ : Tuple = str(a_ )
A_ : str = os.path.join(a_ , a_ )
if os.path.isfile(a_ ):
A_ : Union[str, Any] = module_file_or_url
A_ : Tuple = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
A_ : Optional[Any] = get_diffusers_versions()
# cut ".dev0"
A_ : Dict = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
A_ : Dict = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
A_ : List[Any] = F"v{revision}"
elif revision == "main":
A_ : int = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
A_ : Optional[int] = COMMUNITY_PIPELINES_URL.format(revision=a_ , pipeline=a_ )
try:
A_ : Optional[Any] = cached_download(
a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , )
A_ : List[Any] = """git"""
A_ : List[str] = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
A_ : List[str] = hf_hub_download(
a_ , a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , )
A_ : Union[str, Any] = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
A_ : Optional[int] = check_imports(a_ )
# Now we move the module inside our cached dynamic modules.
A_ : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a_ )
A_ : int = Path(a_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a_ , submodule_path / module_file )
for module_needed in modules_needed:
A_ : Any = F"{module_needed}.py"
shutil.copy(os.path.join(a_ , a_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a_ , a_ ):
A_ : List[str] = use_auth_token
elif use_auth_token is True:
A_ : str = HfFolder.get_token()
else:
A_ : Optional[Any] = None
A_ : Optional[Any] = model_info(a_ , revision=a_ , token=a_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A_ : Union[str, Any] = submodule_path / commit_hash
A_ : List[str] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a_ )
if not (submodule_path / module_file).exists():
shutil.copy(a_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a_ , F"{module_needed}.py" , cache_dir=a_ , force_download=a_ , resume_download=a_ , proxies=a_ , use_auth_token=a_ , revision=a_ , local_files_only=a_ , )
return os.path.join(a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , a_ = None , a_ = False , **a_ , ) -> List[str]:
"""simple docstring"""
A_ : Tuple = get_cached_module_file(
a_ , a_ , cache_dir=a_ , force_download=a_ , resume_download=a_ , proxies=a_ , use_auth_token=a_ , revision=a_ , local_files_only=a_ , )
return get_class_in_module(a_ , final_module.replace(""".py""" , """""" ) )
| 344
|
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
try:
with open(a_ , """rb""" ) as flax_state_f:
A_ : Tuple = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A_ : str = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
A_ : Any = """"""
A_ : Optional[int] = flatten_dict(a_ , sep=""".""" )
A_ : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
A_ : Union[str, Any] = []
A_ : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A_ : List[Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A_ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
A_ : Tuple = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A_ : Dict = """.""".join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
A_ : Tuple = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
A_ : Dict = list(a_ )
if len(a_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(a_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 344
| 1
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def A_ ( _lowerCAmelCase : Callable, _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float ):
"""simple docstring"""
_a = int(np.ceil((x_end - xa) / step_size ) )
_a = np.zeros((n + 1,) )
_a = ya
_a = xa
for k in range(_lowerCAmelCase ):
_a = y[k] + step_size * ode_func(_lowerCAmelCase, y[k] )
_a = y[k] + (
(step_size / 2) * (ode_func(_lowerCAmelCase, y[k] ) + ode_func(x + step_size, _lowerCAmelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 153
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = RobertaTokenizer
A_ : Any = RobertaTokenizerFast
A_ : Dict = True
A_ : Tuple = {'cls_token': '<s>'}
def _UpperCAmelCase ( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
_a = '''lower newer'''
_a = '''lower newer'''
return input_text, output_text
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''lower newer'''
_a = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_a = tokenizer.tokenize(__UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained('''roberta-base''' )
_a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
_a = '''Encode this sequence.'''
_a = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing spaces after special tokens
_a = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )} ) # mask token has a left space
_a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
_a = '''Encode <mask> sequence'''
_a = '''Encode <mask>sequence'''
_a = tokenizer.encode(__UpperCAmelCase )
_a = encoded.index(__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer.encode(__UpperCAmelCase )
_a = encoded.index(__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
_a = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
_a = tokenizer_p.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _UpperCAmelCase ( self ) -> Any:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __UpperCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __UpperCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_a = F'{text_of_1_token} {text_of_1_token}'
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
| 153
| 1
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE :
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[str]:
return None
class _SCREAMING_SNAKE_CASE :
def __lowerCAmelCase ( self , __A , __A , __A , __A ) -> int:
return None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
UpperCAmelCase_ :Tuple = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __lowerCAmelCase ( self ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__A , """tf""" , 12 , **__A )
@require_torch
@slow
def __lowerCAmelCase ( self ) -> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__A , """pt""" , 12 , **__A )
@require_torch
@slow
def __lowerCAmelCase ( self ) -> str:
from transformers import BertModel
lowerCAmelCase_ :Tuple = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__A ) )
vocab_file.flush()
lowerCAmelCase_ :Optional[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCAmelCase_ :List[str] = BertModel(BertConfig(vocab_size=len(__A ) ) )
model.save_pretrained(__A )
self._test_export(__A , """pt""" , 12 , __A )
@require_tf
@slow
def __lowerCAmelCase ( self ) -> Dict:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCAmelCase_ :Optional[int] = self._test_export(__A , """tf""" , 12 , **__A )
lowerCAmelCase_ :List[str] = quantize(Path(__A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__A ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def __lowerCAmelCase ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCAmelCase_ :str = self._test_export(__A , """pt""" , 12 , **__A )
lowerCAmelCase_ :Optional[Any] = quantize(__A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__A ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def __lowerCAmelCase ( self , __A , __A , __A , __A=None , **__A ) -> str:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCAmelCase_ :Any = Path(__A ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__A , __A , __A , __A , __A , **__A )
return path
except Exception as e:
self.fail(__A )
@require_torch
@require_tokenizers
@slow
def __lowerCAmelCase ( self ) -> Any:
from transformers import BertModel
lowerCAmelCase_ :Tuple = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCAmelCase_ :int = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__A , __A , """pt""" )
@require_tf
@require_tokenizers
@slow
def __lowerCAmelCase ( self ) -> Any:
from transformers import TFBertModel
lowerCAmelCase_ :int = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCAmelCase_ :int = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__A , __A , """tf""" )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[str]:
lowerCAmelCase_ :str = FeatureExtractionPipeline(__A , __A )
lowerCAmelCase_ :str = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[str] = infer_shapes(__A , __A )
# Assert all variables are present
self.assertEqual(len(__A ) , len(__A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __A )
self.assertSequenceEqual(variable_names[3:] , __A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCAmelCase_ :str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCAmelCase_ , lowerCAmelCase_ :str = ensure_valid_input(FuncContiguousArgs() , __A , __A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__A ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__A ) , set(__A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__A , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCAmelCase_ , lowerCAmelCase_ :Any = ensure_valid_input(FuncNonContiguousArgs() , __A , __A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__A ) , 1 )
self.assertEqual(len(__A ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :int = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 84
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ByTaTokenizer
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _UpperCamelCase ( self , **_A ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A , _A=False , _A=20 , _A=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_A ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = ''' ''' + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = '''Unicode €.'''
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''Unicode €.</s>''' )
SCREAMING_SNAKE_CASE_ = tokenizer('''e è é ê ë''' )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization. </s>''']
SCREAMING_SNAKE_CASE_ = ['''Summary of the text. </s>''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['''input_ids'''][0] )
self.assertEqual(_A , batch['''labels'''][0] )
def _UpperCamelCase ( self ) -> Dict:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] )
setattr(_A , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 299
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 1
|
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert nand_gate(0, 0 ) == 1
assert nand_gate(0, 1 ) == 1
assert nand_gate(1, 0 ) == 1
assert nand_gate(1, 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 334
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Any = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 349
| 0
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase : Optional[int] = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str , lowercase : Any , lowercase : Dict=None ):
'''simple docstring'''
lowerCamelCase_ = XLNetConfig.from_json_file(snake_case_ )
lowerCamelCase_ = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowerCamelCase_ = finetuning_task
lowerCamelCase_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowerCamelCase_ = XLNetForSequenceClassification(snake_case_ )
elif "squad" in finetuning_task:
lowerCamelCase_ = finetuning_task
lowerCamelCase_ = XLNetForQuestionAnswering(snake_case_ )
else:
lowerCamelCase_ = XLNetLMHeadModel(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
lowerCamelCase_ = os.path.join(snake_case_ , snake_case_ )
lowerCamelCase_ = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowerCamelCase : Any = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 355
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''transfo-xl'''
UpperCamelCase = ['''mems''']
UpperCamelCase = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , A_ : Optional[Any]=267735 , A_ : Optional[Any]=[20000, 40000, 200000] , A_ : Union[str, Any]=1024 , A_ : Optional[Any]=1024 , A_ : Optional[int]=16 , A_ : Any=64 , A_ : List[Any]=4096 , A_ : str=4 , A_ : int=False , A_ : List[Any]=18 , A_ : Optional[int]=1600 , A_ : Union[str, Any]=1000 , A_ : Optional[Any]=True , A_ : Optional[int]=True , A_ : List[str]=0 , A_ : int=-1 , A_ : List[Any]=True , A_ : List[Any]=0.1 , A_ : str=0.0 , A_ : Dict=True , A_ : Dict="normal" , A_ : Dict=0.01 , A_ : Optional[Any]=0.01 , A_ : Any=0.02 , A_ : int=1E-5 , A_ : List[str]=0 , **A_ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = []
self.cutoffs.extend(A_ )
if proj_share_all_but_first:
lowerCamelCase_ = [False] + [True] * len(self.cutoffs )
else:
lowerCamelCase_ = [False] + [False] * len(self.cutoffs )
lowerCamelCase_ = d_model
lowerCamelCase_ = d_embed
lowerCamelCase_ = d_head
lowerCamelCase_ = d_inner
lowerCamelCase_ = div_val
lowerCamelCase_ = pre_lnorm
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = mem_len
lowerCamelCase_ = same_length
lowerCamelCase_ = attn_type
lowerCamelCase_ = clamp_len
lowerCamelCase_ = sample_softmax
lowerCamelCase_ = adaptive
lowerCamelCase_ = dropout
lowerCamelCase_ = dropatt
lowerCamelCase_ = untie_r
lowerCamelCase_ = init
lowerCamelCase_ = init_range
lowerCamelCase_ = proj_init_std
lowerCamelCase_ = init_std
lowerCamelCase_ = layer_norm_epsilon
super().__init__(eos_token_id=A_ , **A_ )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , A_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 208
| 0
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __lowerCamelCase ( snake_case__ ,snake_case__=None ) -> List[Any]:
"""simple docstring"""
require_version(deps[pkg] ,snake_case__ )
| 306
|
"""simple docstring"""
A: Union[str, Any] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
1_0: "a",
1_1: "b",
1_2: "c",
1_3: "d",
1_4: "e",
1_5: "f",
}
def _snake_case ( UpperCamelCase : float ):
assert type(UpperCamelCase ) in (int, float) and decimal == int(UpperCamelCase )
UpperCAmelCase : str = int(UpperCamelCase )
UpperCAmelCase : Optional[int] = """"""
UpperCAmelCase : List[str] = False
if decimal < 0:
UpperCAmelCase : Any = True
decimal *= -1
while decimal > 0:
UpperCAmelCase , UpperCAmelCase : Dict = divmod(UpperCamelCase , 16 )
UpperCAmelCase : Union[str, Any] = values[remainder] + hexadecimal
UpperCAmelCase : int = """0x""" + hexadecimal
if negative:
UpperCAmelCase : Optional[int] = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
| 0
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[Union[str, Path]] = None
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : Optional[Dict] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = None
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : Optional[Dict] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
def lowerCAmelCase (self : Union[str, Any] ):
return self.__class__(**{k: copy.deepcopy(_A ) for k, v in self.__dict__.items()} )
| 356
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : CommonSchedulerState
# setable values
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : Optional[int] = None
@classmethod
def lowerCAmelCase (cls : int , snake_case_ : CommonSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray ):
return cls(common=snake_case_ , init_noise_sigma=snake_case_ , timesteps=snake_case_ )
@dataclass
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : DDPMSchedulerState
class UpperCamelCase__ ( __lowercase ,__lowercase ):
_SCREAMING_SNAKE_CASE : str = [e.name for e in FlaxKarrasDiffusionSchedulers]
_SCREAMING_SNAKE_CASE : jnp.dtype
@property
def lowerCAmelCase (self : Optional[Any] ):
return True
@register_to_config
def __init__(self : Any , snake_case_ : int = 1_0_0_0 , snake_case_ : float = 0.0001 , snake_case_ : float = 0.02 , snake_case_ : str = "linear" , snake_case_ : Optional[jnp.ndarray] = None , snake_case_ : str = "fixed_small" , snake_case_ : bool = True , snake_case_ : str = "epsilon" , snake_case_ : jnp.dtype = jnp.floataa , ):
__a : str = dtype
def lowerCAmelCase (self : Any , snake_case_ : Optional[CommonSchedulerState] = None ):
if common is None:
__a : Optional[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__a : int = jnp.array(1.0 , dtype=self.dtype )
__a : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case_ , init_noise_sigma=snake_case_ , timesteps=snake_case_ , )
def lowerCAmelCase (self : Dict , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : Optional[int] = None ):
return sample
def lowerCAmelCase (self : List[Any] , snake_case_ : DDPMSchedulerState , snake_case_ : int , snake_case_ : Tuple = () ):
__a : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__a : Any = (jnp.arange(0 , snake_case_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case_ , timesteps=snake_case_ , )
def lowerCAmelCase (self : List[Any] , snake_case_ : DDPMSchedulerState , snake_case_ : Optional[Any] , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=None ):
__a : Optional[Any] = state.common.alphas_cumprod[t]
__a : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__a : Optional[int] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__a : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__a : Optional[Any] = jnp.clip(snake_case_ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__a : int = jnp.log(jnp.clip(snake_case_ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
__a : List[str] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__a : Union[str, Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__a : Any = variance
__a : Dict = state.common.betas[t]
__a : Any = (predicted_variance + 1) / 2
__a : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase (self : Any , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : int , snake_case_ : jnp.ndarray , snake_case_ : Optional[jax.random.KeyArray] = None , snake_case_ : bool = True , ):
__a : int = timestep
if key is None:
__a : Any = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__a , __a : List[str] = jnp.split(snake_case_ , sample.shape[1] , axis=1 )
else:
__a : int = None
# 1. compute alphas, betas
__a : Optional[int] = state.common.alphas_cumprod[t]
__a : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__a : Optional[int] = 1 - alpha_prod_t
__a : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__a : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__a : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
__a : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__a : Dict = jnp.clip(snake_case_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a : str = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__a : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__a : Optional[int] = jax.random.split(snake_case_ , num=1 )
__a : Union[str, Any] = jax.random.normal(snake_case_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case_ , snake_case_ , predicted_variance=snake_case_ ) ** 0.5) * noise
__a : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__a : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case_ , state=snake_case_ )
def lowerCAmelCase (self : List[str] , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , ):
return add_noise_common(state.common , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase (self : str , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , ):
return get_velocity_common(state.common , snake_case_ , snake_case_ , snake_case_ )
def __len__(self : List[str] ):
return self.config.num_train_timesteps
| 90
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class a__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCamelCase = 'beit'
def __init__( self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_a )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = use_mask_token
A__ = use_absolute_position_embeddings
A__ = use_relative_position_bias
A__ = use_shared_relative_position_bias
A__ = layer_scale_init_value
A__ = drop_path_rate
A__ = use_mean_pooling
# decode head attributes (semantic segmentation)
A__ = out_indices
A__ = pool_scales
# auxiliary head attributes (semantic segmentation)
A__ = use_auxiliary_head
A__ = auxiliary_loss_weight
A__ = auxiliary_channels
A__ = auxiliary_num_convs
A__ = auxiliary_concat_input
A__ = semantic_loss_ignore_index
class a__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCamelCase = version.parse('1.11' )
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
return 1e-4
| 68
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 45
| 0
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase: Union[str, Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a__( datasets.BuilderConfig ):
lowercase__ = None
def lowerCamelCase__ ( _A , _A , ):
import pyspark
def generate_fn():
a : int = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
a : List[Any] = df_with_partition_id.select('*' ).where(f"""part_id = {partition_id}""" ).drop('part_id' )
a : Optional[int] = partition_df.collect()
a : Any = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class a__( _BaseExamplesIterable ):
def __init__( self : List[Any] , __snake_case : "pyspark.sql.DataFrame" , __snake_case : int=None , ):
a : List[str] = df
a : Optional[int] = partition_order or range(self.df.rdd.getNumPartitions() )
a : List[str] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : str ):
yield from self.generate_examples_fn()
def lowercase_ ( self : Tuple , __snake_case : np.random.Generator ):
a : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__snake_case )
return SparkExamplesIterable(self.df , partition_order=__snake_case )
def lowercase_ ( self : int , __snake_case : int , __snake_case : int ):
a : str = self.split_shard_indices_by_worker(__snake_case , __snake_case )
return SparkExamplesIterable(self.df , partition_order=__snake_case )
@property
def lowercase_ ( self : int ):
return len(self.partition_order )
class a__( datasets.DatasetBuilder ):
lowercase__ = SparkConfig
def __init__( self : Tuple , __snake_case : "pyspark.sql.DataFrame" , __snake_case : str = None , __snake_case : str = None , **__snake_case : Tuple , ):
import pyspark
a : Tuple = pyspark.sql.SparkSession.builder.getOrCreate()
a : Union[str, Any] = df
a : Optional[int] = working_dir
super().__init__(
cache_dir=__snake_case , config_name=str(self.df.semanticHash() ) , **__snake_case , )
def lowercase_ ( self : Dict ):
# Returns the path of the created file.
def create_cache_and_write_probe(__snake_case : Tuple ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__snake_case )
a : Dict = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__snake_case , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
a : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__snake_case ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def lowercase_ ( self : Optional[Any] ):
return datasets.DatasetInfo(features=self.config.features )
def lowercase_ ( self : Tuple , __snake_case : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowercase_ ( self : Optional[int] , __snake_case : List[str] ):
import pyspark
def get_arrow_batch_size(__snake_case : List[str] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
a : List[Any] = self.df.count()
a : Any = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
a : str = (
self.df.limit(__snake_case )
.repartition(1 )
.mapInArrow(__snake_case , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
a : Tuple = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
a : Optional[int] = min(__snake_case , int(approx_total_size / max_shard_size ) )
a : Dict = self.df.repartition(__snake_case )
def lowercase_ ( self : List[str] , __snake_case : str , __snake_case : str , __snake_case : int , ):
import pyspark
a : Optional[Any] = ParquetWriter if file_format == 'parquet' else ArrowWriter
a : Any = os.path.join(self._working_dir , os.path.basename(__snake_case ) ) if self._working_dir else fpath
a : Union[str, Any] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
a : Optional[Any] = self.config.features
a : Optional[Any] = self._writer_batch_size
a : List[str] = self._fs.storage_options
def write_arrow(__snake_case : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
a : Any = pyspark.TaskContext().taskAttemptId()
a : int = next(__snake_case , __snake_case )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
a : Tuple = 0
a : Optional[Any] = writer_class(
features=__snake_case , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=__snake_case , storage_options=__snake_case , embed_local_files=__snake_case , )
a : int = pa.Table.from_batches([first_batch] )
writer.write_table(__snake_case )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
a , a : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
a : Dict = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=__snake_case , storage_options=__snake_case , embed_local_files=__snake_case , )
a : Dict = pa.Table.from_batches([batch] )
writer.write_table(__snake_case )
if writer._num_bytes > 0:
a , a : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__snake_case ) ):
a : List[str] = os.path.join(os.path.dirname(__snake_case ) , os.path.basename(__snake_case ) )
shutil.move(__snake_case , __snake_case )
a : Dict = (
self.df.mapInArrow(__snake_case , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowercase_ ( self : Dict , __snake_case : "datasets.SplitGenerator" , __snake_case : str = "arrow" , __snake_case : Optional[Union[str, int]] = None , __snake_case : Optional[int] = None , **__snake_case : Tuple , ):
self._validate_cache_dir()
a : Optional[int] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__snake_case )
a : Union[str, Any] = not is_remote_filesystem(self._fs )
a : List[Any] = os.path.join if is_local else posixpath.join
a : Any = '-TTTTT-SSSSS-of-NNNNN'
a : List[str] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
a : int = path_join(self._output_dir , __snake_case )
a : List[str] = 0
a : int = 0
a : Any = 0
a : Tuple = []
a : Any = []
for task_id, content in self._prepare_split_single(__snake_case , __snake_case , __snake_case ):
(
(
a
) , (
a
) , (
a
) , (
a
) ,
) : Any = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__snake_case )
a : Union[str, Any] = total_num_examples
a : Dict = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
a : str = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
a : List[str] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__snake_case : int , __snake_case : int , __snake_case : int , ):
rename(
__snake_case , fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace('TTTTT-SSSSS' , F"""{global_shard_id:05d}""" ).replace('NNNNN' , F"""{total_shards:05d}""" ) , )
a : Tuple = []
a : str = 0
for i in range(len(__snake_case ) ):
a , a : Optional[int] = task_id_and_num_shards[i]
for shard_id in range(__snake_case ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__snake_case , len(__snake_case ) ).map(lambda __snake_case : _rename_shard(*__snake_case ) ).collect()
else:
# don't use any pattern
a : Any = 0
a : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace(__snake_case , '' ) , )
def lowercase_ ( self : str , __snake_case : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 96
|
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowerCamelCase__ ( _A=None , _A=None ):
return field(default_factory=lambda: default , metadata=_A )
@dataclass
class a__:
lowercase__ = field(
metadata={"""help""": """The csv file to plot."""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
lowercase__ = list_field(
default=lowerCamelCase__ , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def lowerCamelCase__ ( _A ):
try:
int(_A )
return True
except ValueError:
return False
def lowerCamelCase__ ( _A ):
try:
float(_A )
return True
except ValueError:
return False
class a__:
def __init__( self : Union[str, Any] , __snake_case : Optional[int] ):
a : int = args
a : Dict = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
a : List[Any] = csv.DictReader(__snake_case )
for row in reader:
a : Dict = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
a : Dict = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
a : int = float(row['result'] )
def lowercase_ ( self : int ):
a , a : Dict = plt.subplots()
a : int = 'Time usage' if self.args.is_time else 'Memory usage'
a : int = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
a : Dict = sorted(set(self.result_dict[model_name]['bsz'] ) )
a : Any = sorted(set(self.result_dict[model_name]['seq_len'] ) )
a : Any = self.result_dict[model_name]['result']
((a) , (a)) : Optional[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
a : int = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
a : Dict = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__snake_case , )
else:
a : int = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((a) , (a)) : Optional[int] = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
a : List[str] = np.asarray(__snake_case , __snake_case )[: len(__snake_case )]
plt.scatter(
__snake_case , __snake_case , label=F"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(__snake_case , __snake_case , '--' )
title_str += F""" {label_model_name} vs."""
a : List[Any] = title_str[:-4]
a : Optional[Any] = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(__snake_case )
plt.xlabel(__snake_case )
plt.ylabel(__snake_case )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowerCamelCase__ ( ):
a : Dict = HfArgumentParser(_A )
a : List[str] = parser.parse_args_into_dataclasses()[0]
a : Dict = Plot(args=_A )
plot.plot()
if __name__ == "__main__":
main()
| 96
| 1
|
'''simple docstring'''
from collections import defaultdict
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowerCamelCase_ = first_str.lower().strip()
lowerCamelCase_ = second_str.lower().strip()
# Remove whitespace
lowerCamelCase_ = first_str.replace(" " , "" )
lowerCamelCase_ = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
return False
# Default values for count should be 0
lowerCamelCase_ = defaultdict(UpperCAmelCase_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCAmelCase_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
a_ : str = input("""Enter the first string """).strip()
a_ : Dict = input("""Enter the second string """).strip()
a_ : Dict = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 55
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 150
| 0
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _snake_case :
def __init__( self , _a , _a=100 , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=4 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=3 , _a=None , _a=[0, 1, 2, 3] , ):
__magic_name__ : List[str] = parent
__magic_name__ : Optional[int] = 100
__magic_name__ : Dict = batch_size
__magic_name__ : Dict = image_size
__magic_name__ : str = patch_size
__magic_name__ : Optional[Any] = num_channels
__magic_name__ : int = is_training
__magic_name__ : List[Any] = use_labels
__magic_name__ : List[str] = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : int = hidden_act
__magic_name__ : int = hidden_dropout_prob
__magic_name__ : str = attention_probs_dropout_prob
__magic_name__ : Tuple = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : str = scope
__magic_name__ : Any = out_indices
__magic_name__ : str = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ : Dict = (image_size // patch_size) ** 2
__magic_name__ : str = num_patches + 1
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[Any] = None
__magic_name__ : Tuple = None
if self.use_labels:
__magic_name__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__magic_name__ : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a ):
__magic_name__ : List[str] = BeitModel(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a ):
__magic_name__ : Union[str, Any] = BeitForMaskedImageModeling(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Tuple = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a ):
__magic_name__ : Union[str, Any] = self.type_sequence_label_size
__magic_name__ : List[Any] = BeitForImageClassification(_a )
model.to(_a )
model.eval()
__magic_name__ : Any = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : List[Any] = 1
__magic_name__ : str = BeitForImageClassification(_a )
model.to(_a )
model.eval()
__magic_name__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Tuple = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a ):
__magic_name__ : Dict = self.num_labels
__magic_name__ : Optional[int] = BeitForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__magic_name__ : str = model(_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__magic_name__ : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : int = config_and_inputs
__magic_name__ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = BeitModelTester(self )
__magic_name__ : Dict = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : str = model_class(_a )
__magic_name__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Union[str, Any] = [*signature.parameters.keys()]
__magic_name__ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
def SCREAMING_SNAKE_CASE ( self ):
if not self.model_tester.is_training:
return
__magic_name__ , __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_a ), BeitForMaskedImageModeling]:
continue
__magic_name__ : Any = model_class(_a )
model.to(_a )
model.train()
__magic_name__ : Optional[Any] = self._prepare_for_class(_a , _a , return_labels=_a )
__magic_name__ : List[Any] = model(**_a ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__magic_name__ : List[str] = False
__magic_name__ : Union[str, Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_a ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__magic_name__ : Union[str, Any] = model_class(_a )
model.gradient_checkpointing_enable()
model.to(_a )
model.train()
__magic_name__ : List[str] = self._prepare_for_class(_a , _a , return_labels=_a )
__magic_name__ : Union[str, Any] = model(**_a ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = _config_zero_init(_a )
for model_class in self.all_model_classes:
__magic_name__ : str = model_class(config=_a )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Any = BeitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(_a )
__magic_name__ : int = self.default_image_processor
__magic_name__ : Tuple = prepare_img()
__magic_name__ : Union[str, Any] = image_processor(images=_a , return_tensors="pt" ).pixel_values.to(_a )
# prepare bool_masked_pos
__magic_name__ : Any = torch.ones((1, 196) , dtype=torch.bool ).to(_a )
# forward pass
with torch.no_grad():
__magic_name__ : Optional[Any] = model(pixel_values=_a , bool_masked_pos=_a )
__magic_name__ : List[str] = outputs.logits
# verify the logits
__magic_name__ : List[Any] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , _a )
__magic_name__ : List[Any] = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(_a )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _a , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(_a )
__magic_name__ : Any = self.default_image_processor
__magic_name__ : List[str] = prepare_img()
__magic_name__ : List[Any] = image_processor(images=_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
__magic_name__ : Optional[Any] = model(**_a )
__magic_name__ : List[Any] = outputs.logits
# verify the logits
__magic_name__ : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , _a )
__magic_name__ : List[Any] = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(_a )
self.assertTrue(torch.allclose(logits[0, :3] , _a , atol=1e-4 ) )
__magic_name__ : Union[str, Any] = 281
self.assertEqual(logits.argmax(-1 ).item() , _a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
_a )
__magic_name__ : List[str] = self.default_image_processor
__magic_name__ : Union[str, Any] = prepare_img()
__magic_name__ : Optional[int] = image_processor(images=_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
__magic_name__ : int = model(**_a )
__magic_name__ : Optional[int] = outputs.logits
# verify the logits
__magic_name__ : str = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , _a )
__magic_name__ : Union[str, Any] = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(_a )
self.assertTrue(torch.allclose(logits[0, :3] , _a , atol=1e-4 ) )
__magic_name__ : Tuple = 2_396
self.assertEqual(logits.argmax(-1 ).item() , _a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
__magic_name__ : Optional[Any] = model.to(_a )
__magic_name__ : Union[str, Any] = BeitImageProcessor(do_resize=_a , size=640 , do_center_crop=_a )
__magic_name__ : str = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__magic_name__ : str = Image.open(ds[0]["file"] )
__magic_name__ : Dict = image_processor(images=_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
__magic_name__ : Dict = model(**_a )
__magic_name__ : str = outputs.logits
# verify the logits
__magic_name__ : Dict = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _a )
__magic_name__ : Any = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
__magic_name__ : Dict = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=_a , )
else:
__magic_name__ : Union[str, Any] = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
__magic_name__ : str = model.to(_a )
__magic_name__ : List[str] = BeitImageProcessor(do_resize=_a , size=640 , do_center_crop=_a )
__magic_name__ : str = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__magic_name__ : Any = Image.open(ds[0]["file"] )
__magic_name__ : Any = image_processor(images=_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
__magic_name__ : List[str] = model(**_a )
__magic_name__ : Any = outputs.logits.detach().cpu()
__magic_name__ : Tuple = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(500, 300)] )
__magic_name__ : Dict = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _a )
__magic_name__ : int = image_processor.post_process_semantic_segmentation(outputs=_a )
__magic_name__ : Dict = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _a )
| 41
|
import numpy
class _snake_case :
def __init__( self , _a , _a ):
__magic_name__ : Optional[Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ : Any = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ : List[str] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ : Any = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ : Tuple = numpy.zeros(output_array.shape )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ : Optional[int] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
for iteration in range(1 , iterations + 1 ):
__magic_name__ : Any = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : str = input_arr
__magic_name__ : Optional[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase_ ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase_ ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ : List[str] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_snake_case , output_array=_snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 41
| 1
|
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = AutoencoderKL
lowercase__ = "sample"
lowercase__ = 1e-2
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[Any] = 4
_snake_case : Tuple = 3
_snake_case : Dict = (32, 32)
_snake_case : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(a_ )
return {"sample": image}
@property
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return (3, 32, 32)
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
_snake_case : Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(torch_device == """mps""", """Gradient checkpointing skipped on MPS""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
_snake_case : str = self.model_class(**a_ )
model.to(a_ )
assert not model.is_gradient_checkpointing and model.training
_snake_case : List[str] = model(**a_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_snake_case : str = torch.randn_like(a_ )
_snake_case : int = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_snake_case : int = self.model_class(**a_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(a_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_snake_case : Tuple = model_a(**a_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_snake_case : int = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_snake_case : List[Any] = dict(model.named_parameters() )
_snake_case : List[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data, named_params_a[name].grad.data, atol=5E-5 ) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""", output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertEqual(len(loading_info["""missing_keys"""] ), 0 )
model.to(a_ )
_snake_case : List[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : str = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
_snake_case : Tuple = model.to(a_ )
model.eval()
if torch_device == "mps":
_snake_case : int = torch.manual_seed(0 )
else:
_snake_case : Tuple = torch.Generator(device=a_ ).manual_seed(0 )
_snake_case : List[str] = torch.randn(
1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0 ), )
_snake_case : Union[str, Any] = image.to(a_ )
with torch.no_grad():
_snake_case : List[str] = model(a_, sample_posterior=a_, generator=a_ ).sample
_snake_case : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_snake_case : Any = torch.tensor(
[
-4.00_78E-01,
-3.83_23E-04,
-1.26_81E-01,
-1.14_62E-01,
2.00_95E-01,
1.08_93E-01,
-8.82_47E-02,
-3.03_61E-01,
-9.86_44E-03,
] )
elif torch_device == "cpu":
_snake_case : Dict = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
_snake_case : List[Any] = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(a_, a_, rtol=1E-2 ) )
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: List[str], a_: List[Any], a_: List[Any] ):
'''simple docstring'''
return f"gaussian_noise_s={seed}_shape={'_'.join([str(a_ ) for s in shape] )}.npy"
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self: str, a_: List[str]=0, a_: Tuple=(4, 3, 512, 512), a_: Optional[Any]=False ):
'''simple docstring'''
_snake_case : str = torch.floataa if fpaa else torch.floataa
_snake_case : int = torch.from_numpy(load_hf_numpy(self.get_file_format(a_, a_ ) ) ).to(a_ ).to(a_ )
return image
def UpperCamelCase_ ( self: Any, a_: Optional[int]="CompVis/stable-diffusion-v1-4", a_: str=False ):
'''simple docstring'''
_snake_case : str = """fp16""" if fpaa else None
_snake_case : Optional[int] = torch.floataa if fpaa else torch.floataa
_snake_case : Union[str, Any] = AutoencoderKL.from_pretrained(
a_, subfolder="""vae""", torch_dtype=a_, revision=a_, )
model.to(a_ ).eval()
return model
def UpperCamelCase_ ( self: Union[str, Any], a_: List[str]=0 ):
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(a_ )
return torch.Generator(device=a_ ).manual_seed(a_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCamelCase_ ( self: Dict, a_: Any, a_: Any, a_: int ):
'''simple docstring'''
_snake_case : str = self.get_sd_vae_model()
_snake_case : str = self.get_sd_image(a_ )
_snake_case : Dict = self.get_generator(a_ )
with torch.no_grad():
_snake_case : str = model(a_, generator=a_, sample_posterior=a_ ).sample
assert sample.shape == image.shape
_snake_case : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_snake_case : Optional[int] = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(a_, a_, atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase_ ( self: Tuple, a_: int, a_: Dict ):
'''simple docstring'''
_snake_case : Tuple = self.get_sd_vae_model(fpaa=a_ )
_snake_case : Tuple = self.get_sd_image(a_, fpaa=a_ )
_snake_case : Tuple = self.get_generator(a_ )
with torch.no_grad():
_snake_case : Optional[int] = model(a_, generator=a_, sample_posterior=a_ ).sample
assert sample.shape == image.shape
_snake_case : Dict = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_snake_case : Tuple = torch.tensor(a_ )
assert torch_all_close(a_, a_, atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCamelCase_ ( self: List[str], a_: Tuple, a_: List[Any], a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.get_sd_vae_model()
_snake_case : Union[str, Any] = self.get_sd_image(a_ )
with torch.no_grad():
_snake_case : Optional[int] = model(a_ ).sample
assert sample.shape == image.shape
_snake_case : int = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_snake_case : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(a_, a_, atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[int], a_: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_sd_vae_model()
_snake_case : List[str] = self.get_sd_image(a_, shape=(3, 4, 64, 64) )
with torch.no_grad():
_snake_case : Union[str, Any] = model.decode(a_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_snake_case : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().cpu()
_snake_case : List[Any] = torch.tensor(a_ )
assert torch_all_close(a_, a_, atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any], a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.get_sd_vae_model(fpaa=a_ )
_snake_case : Union[str, Any] = self.get_sd_image(a_, shape=(3, 4, 64, 64), fpaa=a_ )
with torch.no_grad():
_snake_case : Any = model.decode(a_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_snake_case : int = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_snake_case : Tuple = torch.tensor(a_ )
assert torch_all_close(a_, a_, atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available(), reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCamelCase_ ( self: Any, a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.get_sd_vae_model(fpaa=a_ )
_snake_case : List[Any] = self.get_sd_image(a_, shape=(3, 4, 64, 64), fpaa=a_ )
with torch.no_grad():
_snake_case : Optional[int] = model.decode(a_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_snake_case : List[str] = model.decode(a_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(a_, a_, atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available(), reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCamelCase_ ( self: str, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.get_sd_vae_model()
_snake_case : Any = self.get_sd_image(a_, shape=(3, 4, 64, 64) )
with torch.no_grad():
_snake_case : int = model.decode(a_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_snake_case : Union[str, Any] = model.decode(a_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(a_, a_, atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : str = self.get_sd_vae_model()
_snake_case : int = self.get_sd_image(a_ )
_snake_case : Dict = self.get_generator(a_ )
with torch.no_grad():
_snake_case : Dict = model.encode(a_ ).latent_dist
_snake_case : Dict = dist.sample(generator=a_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_snake_case : Optional[int] = sample[0, -1, -3:, -3:].flatten().cpu()
_snake_case : Tuple = torch.tensor(a_ )
_snake_case : List[Any] = 3E-3 if torch_device != """mps""" else 1E-2
assert torch_all_close(a_, a_, atol=a_ )
| 64
|
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A_ = re.compile(r'''\s+''')
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(snake_case__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Any = [len(snake_case__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(snake_case__ ), "line_max": max(snake_case__ )}
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : List[str]=5 ):
"""simple docstring"""
_snake_case : Any = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_snake_case : Tuple = example["""content"""].splitlines()
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Union[str, Any]=5 , snake_case__ : Any=0.05 ):
"""simple docstring"""
_snake_case : Optional[Any] = ["""unit tests""", """test file""", """configuration file"""]
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : Dict = 0
_snake_case : str = 0
# first test
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case : Optional[int] = example["""content"""].count("""\n""" )
_snake_case : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[int] = ["""def """, """class """, """for """, """while """]
_snake_case : str = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[str]=4 ):
"""simple docstring"""
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : str = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ (snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : Optional[Any] = tokenizer(example["""content"""] , truncation=snake_case__ )["""input_ids"""]
_snake_case : Optional[Any] = len(example["""content"""] ) / len(snake_case__ )
return {"ratio": ratio}
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[int] = {}
results.update(get_hash(snake_case__ ) )
results.update(line_stats(snake_case__ ) )
results.update(alpha_stats(snake_case__ ) )
results.update(char_token_ratio(snake_case__ ) )
results.update(is_autogenerated(snake_case__ ) )
results.update(is_config_or_test(snake_case__ ) )
results.update(has_no_keywords(snake_case__ ) )
results.update(has_few_assignments(snake_case__ ) )
return results
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
if not check_uniques(snake_case__ , snake_case__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
with open(snake_case__ , """rb""" ) as f_in:
with gzip.open(str(snake_case__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(snake_case__ , snake_case__ )
os.unlink(snake_case__ )
# Settings
A_ = HfArgumentParser(PreprocessingArguments)
A_ = parser.parse_args()
if args.num_workers is None:
A_ = multiprocessing.cpu_count()
A_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A_ = time.time()
A_ = load_dataset(args.dataset_name, split='''train''')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
A_ = time.time()
A_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
A_ = set(ds.unique('''hash'''))
A_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
A_ = time.time()
A_ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A_ = time.time()
A_ , A_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
A_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
A_ = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
A_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A_ = str(data_dir / F'''file-{file_number+1:012}.json''')
A_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 64
| 1
|
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
__magic_name__ : Optional[Any] = str(bin(lowerCAmelCase ) )[2:] # remove the leading "0b"
__magic_name__ : Union[str, Any] = str(bin(lowerCAmelCase ) )[2:]
__magic_name__ : Any = max(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase ) , b_binary.zfill(lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
set_seed(7_7_0)
lowerCAmelCase :str = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowerCAmelCase :Any = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowerCAmelCase :List[Any] = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase :List[Any] = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowerCAmelCase :List[str] = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
__magic_name__ : str = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase , REMOTE_MODEL_PATHS[key]['file_name'] )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
hf_hub_download(repo_id=lowerCAmelCase , filename=lowerCAmelCase , local_dir=lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : str="text" ):
"""simple docstring"""
if model_type == "text":
__magic_name__ : Tuple = BarkSemanticModel
__magic_name__ : Optional[int] = BarkSemanticConfig
__magic_name__ : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
__magic_name__ : List[str] = BarkCoarseModel
__magic_name__ : Dict = BarkCoarseConfig
__magic_name__ : Tuple = BarkCoarseGenerationConfig
elif model_type == "fine":
__magic_name__ : Optional[Any] = BarkFineModel
__magic_name__ : Dict = BarkFineConfig
__magic_name__ : Tuple = BarkFineGenerationConfig
else:
raise NotImplementedError()
__magic_name__ : int = f'{model_type}_small' if use_small else model_type
__magic_name__ : List[str] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase ):
logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info['repo_id'] , model_info['file_name'] )
__magic_name__ : Optional[Any] = torch.load(lowerCAmelCase , map_location=lowerCAmelCase )
# this is a hack
__magic_name__ : Optional[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
__magic_name__ : Dict = model_args['vocab_size']
__magic_name__ : Optional[int] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__magic_name__ : Optional[Any] = model_args.pop('n_head' )
__magic_name__ : List[str] = model_args.pop('n_embd' )
__magic_name__ : List[Any] = model_args.pop('n_layer' )
__magic_name__ : Optional[Any] = ConfigClass(**checkpoint['model_args'] )
__magic_name__ : Any = ModelClass(config=lowerCAmelCase )
__magic_name__ : List[str] = GenerationConfigClass()
__magic_name__ : List[Any] = model_generation_config
__magic_name__ : str = checkpoint['model']
# fixup checkpoint
__magic_name__ : str = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase ):
# replace part of the key with corresponding layer name in HF implementation
__magic_name__ : Tuple = k[len(lowerCAmelCase ) :]
for old_layer_name in new_layer_name_dict:
__magic_name__ : int = new_k.replace(lowerCAmelCase , new_layer_name_dict[old_layer_name] )
__magic_name__ : Union[str, Any] = state_dict.pop(lowerCAmelCase )
__magic_name__ : Optional[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
__magic_name__ : Any = {k for k in extra_keys if not k.endswith('.attn.bias' )}
__magic_name__ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
__magic_name__ : Dict = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowerCAmelCase ) != 0:
raise ValueError(f'extra keys found: {extra_keys}' )
if len(lowerCAmelCase ) != 0:
raise ValueError(f'missing keys: {missing_keys}' )
model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
__magic_name__ : Union[str, Any] = model.num_parameters(exclude_embeddings=lowerCAmelCase )
__magic_name__ : Optional[Any] = checkpoint['best_val_loss'].item()
logger.info(f'model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase , 3 )} loss' )
model.eval()
model.to(lowerCAmelCase )
del checkpoint, state_dict
return model
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Tuple="text" ):
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__magic_name__ : List[str] = 'cpu' # do conversion on cpu
__magic_name__ : int = _get_ckpt_path(lowerCAmelCase , use_small=lowerCAmelCase )
__magic_name__ : Any = _load_model(lowerCAmelCase , lowerCAmelCase , model_type=lowerCAmelCase , use_small=lowerCAmelCase )
# load bark initial model
__magic_name__ : List[str] = _bark_load_model(lowerCAmelCase , 'cpu' , model_type=lowerCAmelCase , use_small=lowerCAmelCase )
if model_type == "text":
__magic_name__ : int = bark_model['model']
if model.num_parameters(exclude_embeddings=lowerCAmelCase ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__magic_name__ : Union[str, Any] = 5
__magic_name__ : Optional[int] = 10
if model_type in ["text", "coarse"]:
__magic_name__ : Optional[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__magic_name__ : List[str] = bark_model(lowerCAmelCase )[0]
__magic_name__ : Optional[int] = model(lowerCAmelCase )
# take last logits
__magic_name__ : int = output_new_model_total.logits[:, [-1], :]
else:
__magic_name__ : Tuple = 3
__magic_name__ : List[str] = 8
__magic_name__ : List[str] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__magic_name__ : str = model(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = bark_model(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : str , ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Dict = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : str = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : int = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : List[Any] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__magic_name__ : Optional[int] = BarkSemanticModel.from_pretrained(lowerCAmelCase )
__magic_name__ : Dict = BarkCoarseModel.from_pretrained(lowerCAmelCase )
__magic_name__ : List[str] = BarkFineModel.from_pretrained(lowerCAmelCase )
__magic_name__ : Optional[Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
__magic_name__ : Dict = BarkConfig.from_sub_model_configs(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__magic_name__ : int = BarkModel(lowerCAmelCase )
__magic_name__ : List[str] = semantic
__magic_name__ : Optional[int] = coarseAcoustic
__magic_name__ : List[str] = fineAcoustic
__magic_name__ : int = codec
__magic_name__ : Union[str, Any] = bark_generation_config
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
bark.save_pretrained(lowerCAmelCase , repo_id=lowerCAmelCase , push_to_hub=lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowerCAmelCase :Union[str, Any] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 275
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = ["image_processor", "tokenizer"]
__lowerCamelCase : str = "LayoutLMv2ImageProcessor"
__lowerCamelCase : str = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__ ):
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", lowerCamelCase__, )
A : str = kwargs.pop("""feature_extractor""" )
A : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase__, lowerCamelCase__ )
def __call__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = True, lowerCamelCase__ = False, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = 0, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = True, lowerCamelCase__ = None, **lowerCamelCase__, ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
A : List[str] = self.image_processor(images=lowerCamelCase__, return_tensors=lowerCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Any = [text] # add batch dimension (as the image processor always adds a batch dimension)
A : List[str] = features["""words"""]
A : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features["""boxes"""], word_labels=lowerCamelCase__, add_special_tokens=lowerCamelCase__, padding=lowerCamelCase__, truncation=lowerCamelCase__, max_length=lowerCamelCase__, stride=lowerCamelCase__, pad_to_multiple_of=lowerCamelCase__, return_token_type_ids=lowerCamelCase__, return_attention_mask=lowerCamelCase__, return_overflowing_tokens=lowerCamelCase__, return_special_tokens_mask=lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, return_length=lowerCamelCase__, verbose=lowerCamelCase__, return_tensors=lowerCamelCase__, **lowerCamelCase__, )
# add pixel values
A : Dict = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A : Any = self.get_overflowing_images(lowerCamelCase__, encoded_inputs["""overflow_to_sample_mapping"""] )
A : str = images
return encoded_inputs
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A : Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}''' )
return images_with_overflow
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
return self.tokenizer.batch_decode(*lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
return self.tokenizer.decode(*lowerCamelCase__, **lowerCamelCase__ )
@property
def _lowerCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCAmelCase ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", lowerCamelCase__, )
return self.image_processor_class
@property
def _lowerCAmelCase ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", lowerCamelCase__, )
return self.image_processor
| 116
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_:Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = ["input_features", "is_longer"]
def __init__( self, lowerCamelCase__=64, lowerCamelCase__=4_8000, lowerCamelCase__=480, lowerCamelCase__=10, lowerCamelCase__=1024, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__ = 0, lowerCamelCase__ = 1_4000, lowerCamelCase__ = None, lowerCamelCase__ = "fusion", lowerCamelCase__ = "repeatpad", **lowerCamelCase__, ):
super().__init__(
feature_size=lowerCamelCase__, sampling_rate=lowerCamelCase__, padding_value=lowerCamelCase__, return_attention_mask=lowerCamelCase__, **lowerCamelCase__, )
A : Tuple = top_db
A : Dict = truncation
A : int = padding
A : Optional[Any] = fft_window_size
A : Optional[Any] = (fft_window_size >> 1) + 1
A : List[Any] = hop_length
A : Tuple = max_length_s
A : str = max_length_s * sampling_rate
A : List[str] = sampling_rate
A : Dict = frequency_min
A : str = frequency_max
A : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase__, min_frequency=lowerCamelCase__, max_frequency=lowerCamelCase__, sampling_rate=lowerCamelCase__, norm=lowerCamelCase__, mel_scale="""htk""", )
A : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase__, min_frequency=lowerCamelCase__, max_frequency=lowerCamelCase__, sampling_rate=lowerCamelCase__, norm="""slaney""", mel_scale="""slaney""", )
def _lowerCAmelCase ( self ):
A : Any = copy.deepcopy(self.__dict__ )
A : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Optional[Any] = spectrogram(
lowerCamelCase__, window_function(self.fft_window_size, """hann""" ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase__, log_mel="""dB""", )
return log_mel_spectrogram.T
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A : Any = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A : Union[str, Any] = [0]
# randomly choose index for each part
A : str = np.random.choice(ranges[0] )
A : str = np.random.choice(ranges[1] )
A : Tuple = np.random.choice(ranges[2] )
A : List[str] = mel[idx_front : idx_front + chunk_frames, :]
A : Union[str, Any] = mel[idx_middle : idx_middle + chunk_frames, :]
A : Any = mel[idx_back : idx_back + chunk_frames, :]
A : Any = torch.tensor(mel[None, None, :] )
A : Union[str, Any] = torch.nn.functional.interpolate(
lowerCamelCase__, size=[chunk_frames, 64], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : List[Any] = mel_shrink[0][0].numpy()
A : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A : int = len(lowerCamelCase__ ) - max_length
A : Optional[int] = np.random.randint(0, overflow + 1 )
A : List[Any] = waveform[idx : idx + max_length]
A : Optional[Any] = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A : List[str] = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters )
A : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A : Optional[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A : str = np.stack([mel, mel, mel, mel], axis=0 )
A : Union[str, Any] = False
else:
A : Dict = self._random_mel_fusion(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : Tuple = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
A : int = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A : Optional[int] = int(max_length / len(lowerCamelCase__ ) )
A : Union[str, Any] = np.stack(np.tile(lowerCamelCase__, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A : Dict = int(max_length / len(lowerCamelCase__ ) )
A : Dict = np.stack(np.tile(lowerCamelCase__, lowerCamelCase__ ) )
A : Any = np.pad(lowerCamelCase__, (0, max_length - waveform.shape[0]), mode="""constant""", constant_values=0 )
if truncation == "fusion":
A : Union[str, Any] = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters )
A : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
A : List[str] = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Any = truncation if truncation is not None else self.truncation
A : Union[str, Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A : str = isinstance(lowerCamelCase__, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
A : Any = is_batched_numpy or (
isinstance(lowerCamelCase__, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
A : Optional[int] = [np.asarray(lowerCamelCase__, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__, np.ndarray ):
A : int = np.asarray(lowerCamelCase__, dtype=np.floataa )
elif isinstance(lowerCamelCase__, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A : List[Any] = [np.asarray(lowerCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
A : Tuple = [
self._get_input_mel(lowerCamelCase__, max_length if max_length else self.nb_max_samples, lowerCamelCase__, lowerCamelCase__ )
for waveform in raw_speech
]
A : Optional[Any] = []
A : Optional[Any] = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__ )
is_longer.append(lowerCamelCase__ )
if truncation == "fusion" and sum(lowerCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A : Tuple = np.random.randint(0, len(lowerCamelCase__ ) )
A : str = True
if isinstance(input_mel[0], lowerCamelCase__ ):
A : int = [np.asarray(lowerCamelCase__, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A : Optional[int] = [[longer] for longer in is_longer]
A : Union[str, Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A : Tuple = BatchFeature(lowerCamelCase__ )
if return_tensors is not None:
A : List[str] = input_features.convert_to_tensors(lowerCamelCase__ )
return input_features
| 116
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Any = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
__UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__UpperCAmelCase : Tuple = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase_ ( self : List[Any] , **UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowerCamelCase_ ( self : Tuple , **UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : List[str] = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : str = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__UpperCAmelCase : Optional[int] = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
__UpperCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = self.get_image_processor()
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
__UpperCAmelCase : Dict = image_processor(__lowerCAmelCase , return_tensors="np" )
__UpperCAmelCase : int = processor(images=__lowerCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : List[Any] = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__UpperCAmelCase : Tuple = "lower newer"
__UpperCAmelCase : List[str] = processor(text=__lowerCAmelCase )
__UpperCAmelCase : List[str] = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : Dict = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__UpperCAmelCase : Any = "lower newer"
__UpperCAmelCase : int = self.prepare_image_inputs()
__UpperCAmelCase : Optional[int] = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCAmelCase ):
processor()
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : List[str] = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__UpperCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : str = processor.batch_decode(__lowerCAmelCase )
__UpperCAmelCase : Tuple = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : Optional[int] = self.get_tokenizer()
__UpperCAmelCase : int = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__UpperCAmelCase : Union[str, Any] = "lower newer"
__UpperCAmelCase : Optional[int] = self.prepare_image_inputs()
__UpperCAmelCase : Tuple = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 358
|
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''M-CLIP'''
def __init__( self : List[str] , UpperCAmelCase_ : str=1_024 , UpperCAmelCase_ : Optional[Any]=768 , **UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Any = transformerDimSize
__UpperCAmelCase : Union[str, Any] = imageDimSize
super().__init__(**UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MCLIPConfig
def __init__( self : Optional[Any] , UpperCAmelCase_ : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : int = XLMRobertaModel(UpperCAmelCase_ )
__UpperCAmelCase : str = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.transformer(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
__UpperCAmelCase : List[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(UpperCAmelCase_ ), embs
| 37
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ ):
A_ : Tuple = """swin"""
A_ : int = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self : List[Any] , a__ : Union[str, Any]=224 , a__ : Union[str, Any]=4 , a__ : Dict=3 , a__ : Union[str, Any]=96 , a__ : List[str]=[2, 2, 6, 2] , a__ : Dict=[3, 6, 12, 24] , a__ : str=7 , a__ : Any=4.0 , a__ : List[Any]=True , a__ : int=0.0 , a__ : List[Any]=0.0 , a__ : List[Any]=0.1 , a__ : str="gelu" , a__ : Union[str, Any]=False , a__ : Tuple=0.0_2 , a__ : Dict=1E-5 , a__ : Tuple=32 , a__ : int=None , a__ : List[str]=None , **a__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**_a )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(_a )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(_a ) - 1) )
__snake_case = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(_a ) + 1 )]
__snake_case = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
A_ : str = version.parse('1.11' )
@property
def a (self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a (self : int ):
"""simple docstring"""
return 1E-4
| 24
|
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a :Tuple = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
a :int = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase ( ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any = """https://pypi.org/pypi/diffusers/json"""
SCREAMING_SNAKE_CASE__ : str = json.loads(request.urlopen(__lowerCAmelCase ).read() )["""releases"""].keys()
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : version.Version(__lowerCAmelCase ) )
def _lowercase ( ) -> Optional[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = Path(__lowerCAmelCase ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
init_hf_modules()
SCREAMING_SNAKE_CASE__ : List[Any] = Path(__lowerCAmelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _lowercase ( __lowerCAmelCase ) -> Tuple:
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : int = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.findall("""^\s*import\s+\.(\S+)\s*$""" , __lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , __lowerCAmelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__lowerCAmelCase ) )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : List[str] = [module_file]
SCREAMING_SNAKE_CASE__ : str = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE__ : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : int = Path(__lowerCAmelCase ).parent
SCREAMING_SNAKE_CASE__ : Dict = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE__ : Any = [F'''{f}.py''' for f in new_import_files]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(__lowerCAmelCase ) == 0
all_relative_imports.extend(__lowerCAmelCase )
return all_relative_imports
def _lowercase ( __lowerCAmelCase ) -> Any:
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : Dict = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.findall("""^\s*import\s+(\S+)\s*$""" , __lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , __lowerCAmelCase , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE__ : str = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE__ : Tuple = list(set(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[str] = []
for imp in imports:
try:
importlib.import_module(__lowerCAmelCase )
except ImportError:
missing_packages.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F'''{', '.join(__lowerCAmelCase )}. Run `pip install {' '.join(__lowerCAmelCase )}`''' )
return get_relative_imports(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ : str = module_path.replace(os.path.sep , """.""" )
SCREAMING_SNAKE_CASE__ : Any = importlib.import_module(__lowerCAmelCase )
if class_name is None:
return find_pipeline_class(__lowerCAmelCase )
return getattr(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE__ : Tuple = dict(inspect.getmembers(__lowerCAmelCase , inspect.isclass ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __lowerCAmelCase )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = cls
return pipeline_class
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Dict:
SCREAMING_SNAKE_CASE__ : str = str(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE__ : List[str] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE__ : List[Any] = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE__ : Any = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE__ : List[str] = F'''v{revision}'''
elif revision == "main":
SCREAMING_SNAKE_CASE__ : int = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE__ : int = COMMUNITY_PIPELINES_URL.format(revision=__lowerCAmelCase , pipeline=__lowerCAmelCase )
try:
SCREAMING_SNAKE_CASE__ : Dict = cached_download(
__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , local_files_only=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : Optional[int] = """git"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE__ : Any = hf_hub_download(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , local_files_only=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE__ : Optional[int] = check_imports(__lowerCAmelCase )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE__ : Any = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = Path(__lowerCAmelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__lowerCAmelCase , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE__ : Tuple = F'''{module_needed}.py'''
shutil.copy(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Dict = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE__ : Optional[int] = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : int = model_info(__lowerCAmelCase , revision=__lowerCAmelCase , token=__lowerCAmelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE__ : Optional[Any] = submodule_path / commit_hash
SCREAMING_SNAKE_CASE__ : Optional[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__lowerCAmelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__lowerCAmelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__lowerCAmelCase , F'''{module_needed}.py''' , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
return os.path.join(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , **__lowerCAmelCase , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_cached_module_file(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
return get_class_in_module(__lowerCAmelCase , final_module.replace(""".py""" , """""" ) )
| 132
| 0
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ : Optional[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ : Any = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase__ : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase__ : List[str] = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowerCamelCase__ ( a , a , a , a ) -> Any:
_A: str = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_A: Union[str, Any] = True
# Deal with multi-line cases
elif (
re.search(
Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _lowercase , )
is not None
):
_A: Any = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_A: List[Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_A: str = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
_A: str = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
_A: Optional[Any] = True
if not attribute_used:
_A: int = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_A: Optional[int] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_A: str = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_A: Any = True
elif attribute.endswith('''_token_id''' ):
_A: int = True
# configuration class specific cases
if not case_allowed:
_A: Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_A: Dict = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCamelCase__ ( a ) -> Any:
_A: int = dict(inspect.signature(config_class.__init__ ).parameters )
_A: List[Any] = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
_A: Optional[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_A: List[str] = {}
if len(config_class.attribute_map ) > 0:
_A: Optional[int] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_A: Union[str, Any] = inspect.getsourcefile(_lowercase )
_A: str = os.path.dirname(_lowercase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_A: List[Any] = [os.path.join(_lowercase , _lowercase ) for fn in os.listdir(_lowercase ) if fn.startswith('''modeling_''' )]
# Get the source code strings
_A: int = []
for path in modeling_paths:
if os.path.isfile(_lowercase ):
with open(_lowercase ) as fp:
modeling_sources.append(fp.read() )
_A: Optional[Any] = []
for config_param, default_value in zip(_lowercase , _lowercase ):
# `attributes` here is all the variant names for `config_param`
_A: Union[str, Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_lowercase , _lowercase , _lowercase , _lowercase ):
unused_attributes.append(attributes[0] )
return sorted(_lowercase )
def lowerCamelCase__ ( ) -> Optional[int]:
_A: Tuple = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_A: Dict = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda a : inspect.isclass(_lowercase )
and issubclass(_lowercase , _lowercase )
and inspect.getmodule(_lowercase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_A: List[Any] = check_config_attributes_being_used(_lowercase )
if len(_lowercase ) > 0:
_A: Tuple = unused_attributes
if len(_lowercase ) > 0:
_A: Union[str, Any] = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(_lowercase )
if __name__ == "__main__":
check_config_attributes()
| 359
|
import os
from pathlib import Path
def lowerCamelCase__ ( ) -> Optional[Any]:
from torch.utils.cpp_extension import load
_A: str = Path(a ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_A: Tuple = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , a , with_cuda=a , extra_include_paths=[str(a )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( A : list ) -> list:
def merge(A : list , A : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(A ) <= 1:
return collection
UpperCAmelCase_ : Union[str, Any] = len(A ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
_UpperCamelCase : Tuple = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 304
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( UpperCamelCase):
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class snake_case__ :
def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Tuple = patch_sizes
UpperCAmelCase_ : int = patch_stride
UpperCAmelCase_ : Any = patch_padding
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Tuple = stride_kv
UpperCAmelCase_ : Optional[Any] = depth
UpperCAmelCase_ : Dict = cls_token
UpperCAmelCase_ : Dict = attention_drop_rate
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ) -> int:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = CvtModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A )
UpperCAmelCase_ : List[str] = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : str = CvtForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = CvtModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : int ) -> List[str]:
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def A ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def A ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def A ( self : List[Any] ) -> Any:
pass
def A ( self : int ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_A )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Dict ) -> List[str]:
def check_hidden_states_output(_A : Dict , _A : str , _A : int ):
UpperCAmelCase_ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Any = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_A , _A , _A )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def A ( self : Optional[int] ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_A )
# verify the logits
UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 304
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : Tuple = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
_lowercase : str = {'allegro/herbert-base-cased': 5_14}
_lowercase : Tuple = {}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = PRETRAINED_INIT_CONFIGURATION
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = HerbertTokenizer
def __init__( self : List[Any] , _lowercase : Optional[int]=None , _lowercase : int=None , _lowercase : Tuple=None , _lowercase : str="<s>" , _lowercase : List[str]="<unk>" , _lowercase : int="<pad>" , _lowercase : str="<mask>" , _lowercase : List[Any]="</s>" , **_lowercase : List[Any] , ):
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sep_token=_lowercase , **_lowercase , )
def a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : str , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 86
|
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( enum.Enum ):
a__ : str = 0
a__ : List[Any] = 1
a__ : str = 2
@add_end_docstrings(_lowerCAmelCase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Optional[Any] , *_lowercase : Any , **_lowercase : Optional[int] ):
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__UpperCAmelCase = None
if self.model.config.prefix is not None:
__UpperCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__UpperCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
__UpperCAmelCase = {**self._preprocess_params, **preprocess_params}
__UpperCAmelCase = {**self._forward_params, **forward_params}
def a ( self : Any , _lowercase : Optional[Any]=None , _lowercase : List[str]=None , _lowercase : int=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : List[Any]=None , **_lowercase : str , ):
__UpperCAmelCase = {}
if prefix is not None:
__UpperCAmelCase = prefix
if prefix:
__UpperCAmelCase = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
__UpperCAmelCase = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
''' [None, \'hole\']''' )
__UpperCAmelCase = handle_long_generation
preprocess_params.update(_lowercase )
__UpperCAmelCase = generate_kwargs
__UpperCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
__UpperCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
__UpperCAmelCase = ReturnType.TENSORS
if return_type is not None:
__UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
__UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
__UpperCAmelCase = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a ( self : Optional[int] , *_lowercase : Optional[int] , **_lowercase : Any ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self : List[str] , _lowercase : str , **_lowercase : Optional[Any] ):
return super().__call__(_lowercase , **_lowercase )
def a ( self : Union[str, Any] , _lowercase : Any , _lowercase : Dict="" , _lowercase : Union[str, Any]=None , **_lowercase : Tuple ):
__UpperCAmelCase = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
__UpperCAmelCase = prompt_text
if handle_long_generation == "hole":
__UpperCAmelCase = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
__UpperCAmelCase = generate_kwargs['''max_new_tokens''']
else:
__UpperCAmelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__UpperCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
__UpperCAmelCase = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
__UpperCAmelCase = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def a ( self : Union[str, Any] , _lowercase : List[str] , **_lowercase : Optional[int] ):
__UpperCAmelCase = model_inputs['''input_ids''']
__UpperCAmelCase = model_inputs.get('''attention_mask''' , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = 1
else:
__UpperCAmelCase = input_ids.shape[0]
__UpperCAmelCase = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__UpperCAmelCase = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
__UpperCAmelCase = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
__UpperCAmelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__UpperCAmelCase = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__UpperCAmelCase = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
__UpperCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
__UpperCAmelCase = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__UpperCAmelCase = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Optional[int]=ReturnType.FULL_TEXT , _lowercase : List[str]=True ):
__UpperCAmelCase = model_outputs['''generated_sequence'''][0]
__UpperCAmelCase = model_outputs['''input_ids''']
__UpperCAmelCase = model_outputs['''prompt_text''']
__UpperCAmelCase = generated_sequence.numpy().tolist()
__UpperCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__UpperCAmelCase = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__UpperCAmelCase = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__UpperCAmelCase = 0
else:
__UpperCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
__UpperCAmelCase = prompt_text + text[prompt_length:]
else:
__UpperCAmelCase = text[prompt_length:]
__UpperCAmelCase = {'''generated_text''': all_text}
records.append(_lowercase )
return records
| 86
| 1
|
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
__snake_case ={
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
__snake_case ="</w>"
__snake_case ="@@ "
def a_ ( lowerCamelCase : Tuple ):
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
return pairs
# Speech2Text2 has no max input length
__snake_case ={"facebook/s2t-wav2vec2-large-en-de": 1_024}
class UpperCAmelCase_ ( UpperCAmelCase__ ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]="<s>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Dict="</s>" , UpperCAmelCase__ : List[Any]="<unk>" , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) -> str:
super().__init__(
unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , do_lower_case=__lowercase , **__lowercase , )
lowerCAmelCase = do_lower_case
with open(__lowercase , encoding='utf-8' ) as vocab_handle:
lowerCAmelCase = json.load(__lowercase )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
lowerCAmelCase = None
lowerCAmelCase = None
else:
with open(__lowercase , encoding='utf-8' ) as merges_handle:
lowerCAmelCase = merges_handle.read().split('\n' )[:-1]
lowerCAmelCase = [tuple(merge.split()[:2] ) for merge in merges]
lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
lowerCAmelCase = {}
@property
def __UpperCAmelCase ( self : List[str] ) -> Any:
return len(self.decoder )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Dict ) -> str:
lowerCAmelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowerCAmelCase = get_pairs(__lowercase )
if not pairs:
return token
while True:
lowerCAmelCase = min(__lowercase , key=lambda UpperCAmelCase__ : self.bpe_ranks.get(__lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(__lowercase ):
try:
lowerCAmelCase = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase = tuple(__lowercase )
lowerCAmelCase = new_word
if len(__lowercase ) == 1:
break
else:
lowerCAmelCase = get_pairs(__lowercase )
lowerCAmelCase = ' '.join(__lowercase )
if word == "\n " + BPE_TOKEN_MERGES:
lowerCAmelCase = '\n' + BPE_TOKEN_MERGES
if word.endswith(__lowercase ):
lowerCAmelCase = word.replace(__lowercase , '' )
lowerCAmelCase = word.replace(' ' , __lowercase )
lowerCAmelCase = word
return word
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Dict ) -> Dict:
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
lowerCAmelCase = text.lower()
lowerCAmelCase = text.split()
lowerCAmelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowercase ).split(' ' ) ) )
return split_tokens
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Any:
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : int ) -> Union[str, Any]:
lowerCAmelCase = self.decoder.get(__lowercase , self.unk_token )
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[str] ) -> Optional[int]:
lowerCAmelCase = ' '.join(__lowercase )
# make sure @@ tokens are concatenated
lowerCAmelCase = ''.join(string.split(__lowercase ) )
return string
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Optional[Any]:
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '\n' )
lowerCAmelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowercase , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCAmelCase = token_index
writer.write(' '.join(__lowercase ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 4
|
from __future__ import annotations
lowercase__ : str = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCamelCase__ ( _A , _A , _A , _A , _A , ):
'''simple docstring'''
snake_case_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_A ) )
] # the reference grid
snake_case_ = 1
snake_case_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_A ) )
] # the action grid
snake_case_ = init[0]
snake_case_ = init[1]
snake_case_ = 0
snake_case_ = g + heuristic[x][y] # cost from starting cell to destination cell
snake_case_ = [[f, g, x, y]]
snake_case_ = False # flag that is set when search is complete
snake_case_ = False # flag set if we can't find expand
while not found and not resign:
if len(_A ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
snake_case_ = cell.pop()
snake_case_ = next_cell[2]
snake_case_ = next_cell[3]
snake_case_ = next_cell[1]
if x == goal[0] and y == goal[1]:
snake_case_ = True
else:
for i in range(len(_A ) ): # to try out different valid actions
snake_case_ = x + DIRECTIONS[i][0]
snake_case_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_A ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
snake_case_ = g + cost
snake_case_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
snake_case_ = 1
snake_case_ = i
snake_case_ = []
snake_case_ = goal[0]
snake_case_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
snake_case_ = x - DIRECTIONS[action[x][y]][0]
snake_case_ = y - DIRECTIONS[action[x][y]][1]
snake_case_ = xa
snake_case_ = ya
invpath.append([x, y] )
snake_case_ = []
for i in range(len(_A ) ):
path.append(invpath[len(_A ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowercase__ : Optional[Any] = [0, 0]
# all coordinates are given in format [y,x]
lowercase__ : Tuple = [len(grid) - 1, len(grid[0]) - 1]
lowercase__ : Dict = 1
# the cost map which pushes the path closer to the goal
lowercase__ : Any = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowercase__ : Union[str, Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowercase__ : int = 99
lowercase__ , lowercase__ : Tuple = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 187
| 0
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 366
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowercase_ = (3, 9, -11, 0, 7, 5, 1, -1)
lowercase_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
class A :
"""simple docstring"""
def __init__( self : List[str],lowercase_ : Iterable[int] )-> None:
'''simple docstring'''
A__ = None
for i in sorted(lowercase_,reverse=lowercase_ ):
A__ = Node(lowercase_,self.head )
def __iter__( self : List[str] )-> Iterator[int]:
'''simple docstring'''
A__ = self.head
while node:
yield node.data
A__ = node.next_node
def __len__( self : str )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : Optional[int] )-> str:
'''simple docstring'''
return " -> ".join([str(lowercase_ ) for node in self] )
def _snake_case( SCREAMING_SNAKE_CASE__ : SortedLinkedList , SCREAMING_SNAKE_CASE__ : SortedLinkedList ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(SCREAMING_SNAKE_CASE__ ) + list(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 282
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
_A = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler("sample_euler" )
_A = "A painting of a squirrel eating a burger"
_A = torch.manual_seed(0 )
_A = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_A = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler("sample_euler" )
_A = "A painting of a squirrel eating a burger"
_A = torch.manual_seed(0 )
_A = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_A = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
_A = "A painting of a squirrel eating a burger"
_A = torch.manual_seed(0 )
_A = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=__UpperCAmelCase , )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 79
|
'''simple docstring'''
from typing import List
import numpy as np
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
_A = max(lists_lengths.values() , default=0 )
return max(1 , __lowercase )
def __lowercase ( __lowercase , __lowercase ) -> List[range]:
'''simple docstring'''
_A = []
for group_idx in range(__lowercase ):
_A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_A = range(__lowercase , start + num_shards_to_add )
shards_indices_per_group.append(__lowercase )
return shards_indices_per_group
def __lowercase ( __lowercase , __lowercase ) -> List[dict]:
'''simple docstring'''
_A = _number_of_shards_in_gen_kwargs(__lowercase )
if num_shards == 1:
return [dict(__lowercase )]
else:
_A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowercase , __lowercase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowercase ) )
]
def __lowercase ( __lowercase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __lowercase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowercase ( __lowercase , __lowercase ) -> dict:
'''simple docstring'''
_A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )}
_A = {}
for size in list_sizes:
_A = list(range(__lowercase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_A = dict(__lowercase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowercase , __lowercase ):
_A = [value[i] for i in indices_per_size[len(__lowercase )]]
return shuffled_kwargs
| 79
| 1
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int ):
'''simple docstring'''
# Initialise PyTorch model
_lowerCAmelCase : Optional[int] = MobileBertConfig.from_json_file(UpperCamelCase_ )
print(F"Building PyTorch model from configuration: {config}" )
_lowerCAmelCase : Tuple = MobileBertForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
_lowerCAmelCase : str = load_tf_weights_in_mobilebert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 159
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCamelCase : Dict = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ):
'''simple docstring'''
for attribute in key.split(""".""" ):
_lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
_lowerCAmelCase : Optional[Any] = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
_lowerCAmelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_lowerCAmelCase : Tuple = value
elif weight_type == "weight_g":
_lowerCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
_lowerCAmelCase : str = value
elif weight_type == "bias":
_lowerCAmelCase : str = value
else:
_lowerCAmelCase : Tuple = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Tuple = fairseq_model.state_dict()
_lowerCAmelCase : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_lowerCAmelCase : Any = None
for name, value in fairseq_dict.items():
_lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase : Any = True
elif name.split(""".""" )[0] == "proj":
_lowerCAmelCase : Union[str, Any] = fairseq_model.proj
_lowerCAmelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
_lowerCAmelCase : Union[str, Any] = name.split(UpperCamelCase_ )[0].split(""".""" )[-2]
_lowerCAmelCase : Optional[Any] = mapped_key.replace("""*""" , UpperCamelCase_ )
if "weight_g" in name:
_lowerCAmelCase : List[str] = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : Tuple = """weight_v"""
elif "bias" in name:
_lowerCAmelCase : Dict = """bias"""
elif "weight" in name:
_lowerCAmelCase : Optional[int] = """weight"""
else:
_lowerCAmelCase : Optional[Any] = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def _UpperCAmelCase (UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : List[str] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : Optional[int] = name.split(""".""" )
_lowerCAmelCase : List[str] = int(items[0] )
_lowerCAmelCase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_lowerCAmelCase : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_lowerCAmelCase : Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_lowerCAmelCase : Tuple = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_lowerCAmelCase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase_ )
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = emb.weight.shape
_lowerCAmelCase : Union[str, Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
_lowerCAmelCase : List[Any] = emb.weight.data
return lin_layer
def _UpperCAmelCase (UpperCamelCase_ : Any ):
'''simple docstring'''
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase : Optional[int] = f.readlines()
_lowerCAmelCase : Dict = [line.split(""" """ )[0] for line in lines]
_lowerCAmelCase : Dict = len(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCamelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , ):
'''simple docstring'''
_lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = SpeechaTextaConfig.from_pretrained(
UpperCamelCase_ , vocab_size=UpperCamelCase_ , decoder_layers=UpperCamelCase_ , do_stable_layer_norm=UpperCamelCase_ )
_lowerCAmelCase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_lowerCAmelCase : Tuple = model[0].eval()
# set weights for wav2vec2 encoder
_lowerCAmelCase : Union[str, Any] = WavaVecaModel(UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = recursively_load_weights_wavaveca(model.encoder , UpperCamelCase_ )
_lowerCAmelCase : List[str] = SpeechaTextaForCausalLM(UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase_ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_lowerCAmelCase : Dict = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_lowerCAmelCase : List[Any] = SpeechEncoderDecoderModel(encoder=UpperCamelCase_ , decoder=UpperCamelCase_ )
_lowerCAmelCase : Any = False
# add projection layer
_lowerCAmelCase : List[Any] = nn.Parameter(projection_layer.weight )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(projection_layer.bias )
_lowerCAmelCase : Any = create_vocab_dict(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Any = SpeechaTextaTokenizer(os.path.join(UpperCamelCase_ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowerCAmelCase : str = hf_wavavec.config.to_dict()
_lowerCAmelCase : Any = tokenizer.pad_token_id
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : Any = tokenizer.eos_token_id
_lowerCAmelCase : Union[str, Any] = """speech_to_text_2"""
_lowerCAmelCase : Any = """wav2vec2"""
_lowerCAmelCase : str = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase_ )
hf_wavavec.save_pretrained(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 159
| 1
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class A_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase__ = 1
@register_to_config
def __init__(self :Optional[int] , _UpperCamelCase :int = 1000 , _UpperCamelCase :Optional[Union[np.ndarray, List[float]]] = None )-> Optional[int]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(_UpperCamelCase )
# standard deviation of the initial noise distribution
__A = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__A = 4
# running values
__A = []
def _lowerCAmelCase (self :str , _UpperCamelCase :int , _UpperCamelCase :Union[str, torch.device] = None )-> Any:
__A = num_inference_steps
__A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__A = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__A = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__A = torch.sin(steps * math.pi / 2 ) ** 2
__A = (1.0 - self.betas**2) ** 0.5
__A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__A = timesteps.to(_UpperCamelCase )
__A = []
def _lowerCAmelCase (self :int , _UpperCamelCase :torch.FloatTensor , _UpperCamelCase :int , _UpperCamelCase :torch.FloatTensor , _UpperCamelCase :bool = True , )-> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
__A = (self.timesteps == timestep).nonzero().item()
__A = timestep_index + 1
__A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_UpperCamelCase )
if len(self.ets ) == 1:
__A = self.ets[-1]
elif len(self.ets ) == 2:
__A = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__A = self._get_prev_sample(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCamelCase )
def _lowerCAmelCase (self :str , _UpperCamelCase :torch.FloatTensor , *_UpperCamelCase :Optional[Any] , **_UpperCamelCase :Optional[int] )-> torch.FloatTensor:
return sample
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Tuple , _UpperCamelCase :List[str] , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Any )-> Any:
__A = self.alphas[timestep_index]
__A = self.betas[timestep_index]
__A = self.alphas[prev_timestep_index]
__A = self.betas[prev_timestep_index]
__A = (sample - sigma * ets) / max(_UpperCamelCase , 1e-8 )
__A = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__(self :Union[str, Any] )-> Optional[int]:
return self.config.num_train_timesteps
| 117
|
import numpy as np
from PIL import Image
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: int , lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__A = np.array(lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__A = 0
__A = 0
__A = 0
__A = 0
# compute the shape of the output matrix
__A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__A = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__A = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A = 0
__A = 0
return updated_arr
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: int , lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__A = np.array(lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__A = 0
__A = 0
__A = 0
__A = 0
# compute the shape of the output matrix
__A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__A = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__A = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A = 0
__A = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 117
| 1
|
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = StableUnCLIPPipeline
UpperCAmelCase : int = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase : int = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase : str = False
def lowerCAmelCase_ ( self : Dict ):
_A = 32
_A = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_A = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=_UpperCAmelCase , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
_A = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
_A = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_000 , clip_sample=_UpperCAmelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
_A = StableUnCLIPImageNormalizer(embedding_dim=_UpperCAmelCase )
_A = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
_A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_A = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
_A = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCAmelCase , layers_per_block=1 , upcast_attention=_UpperCAmelCase , use_linear_projection=_UpperCAmelCase , )
torch.manual_seed(0 )
_A = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
_A = AutoencoderKL()
_A = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any]=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_A = torch.manual_seed(_UpperCAmelCase )
else:
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self : str ):
_A = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=_UpperCAmelCase )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[Any] ):
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
_A = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe('anime turle' , generator=_UpperCAmelCase , output_type='np' )
_A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
_A = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_A = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 271
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
a = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
a = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
a = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Union[str, Any]="binary" , _UpperCAmelCase : List[str]=None ):
_A = fa_score(
_UpperCAmelCase , _UpperCAmelCase , labels=_UpperCAmelCase , pos_label=_UpperCAmelCase , average=_UpperCAmelCase , sample_weight=_UpperCAmelCase )
return {"f1": float(_UpperCAmelCase ) if score.size == 1 else score}
| 271
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : List[Any] =logging.get_logger(__name__)
_A : Any ={
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _lowercase ( _lowercase ):
a = """biogpt"""
def __init__( self: List[str] , UpperCamelCase__: str=42_384 , UpperCamelCase__: Optional[Any]=1_024 , UpperCamelCase__: List[Any]=24 , UpperCamelCase__: Dict=16 , UpperCamelCase__: Optional[int]=4_096 , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: Optional[int]=0.1 , UpperCamelCase__: List[Any]=1_024 , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: Union[str, Any]=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Any=1 , UpperCamelCase__: str=0 , UpperCamelCase__: Optional[int]=2 , **UpperCamelCase__: List[str] , ):
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : Union[str, Any] = layer_norm_eps
lowerCamelCase__ : int = scale_embedding
lowerCamelCase__ : Tuple = use_cache
lowerCamelCase__ : Tuple = layerdrop
lowerCamelCase__ : str = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 41
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000 ) -> int:
lowerCamelCase__ : str = -1
lowerCamelCase__ : Dict = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCamelCase__ : Dict = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCamelCase__ : Any = n - a - b
if c * c == (a * a + b * b):
lowerCamelCase__ : Dict = a * b * c
if candidate >= product:
lowerCamelCase__ : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 41
| 1
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = int(UpperCAmelCase_ )
# Initialize Result
UpperCAmelCase : List[Any] = []
# Traverse through all denomination
for denomination in reversed(UpperCAmelCase_ ):
# Find denominations
while int(UpperCAmelCase_ ) >= int(UpperCAmelCase_ ):
total_value -= int(UpperCAmelCase_ )
answer.append(UpperCAmelCase_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowercase__ = []
lowercase__ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
lowercase__ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
lowercase__ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
lowercase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowercase__ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f'''Following is minimal change for {value}: ''')
lowercase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 280
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 280
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : Optional[int] = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'xlm'
_a = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self : Any, lowerCamelCase : Optional[int]=3_0145, lowerCamelCase : Any=2048, lowerCamelCase : str=12, lowerCamelCase : List[str]=16, lowerCamelCase : Any=0.1, lowerCamelCase : Tuple=0.1, lowerCamelCase : List[Any]=True, lowerCamelCase : List[Any]=False, lowerCamelCase : Optional[Any]=False, lowerCamelCase : str=False, lowerCamelCase : Dict=1, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Optional[int]=512, lowerCamelCase : int=2048**-0.5, lowerCamelCase : Union[str, Any]=1E-12, lowerCamelCase : Tuple=0.02, lowerCamelCase : List[str]=0, lowerCamelCase : str=1, lowerCamelCase : Tuple=2, lowerCamelCase : List[str]=3, lowerCamelCase : Any=5, lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple="first", lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Any=True, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : str=5, lowerCamelCase : Tuple=5, lowerCamelCase : Any=0, lowerCamelCase : str=0, lowerCamelCase : str=2, lowerCamelCase : Optional[Any]=0, **lowerCamelCase : Dict, )-> str:
lowerCamelCase__ : Any =vocab_size
lowerCamelCase__ : Union[str, Any] =emb_dim
lowerCamelCase__ : Union[str, Any] =n_layers
lowerCamelCase__ : Optional[Any] =n_heads
lowerCamelCase__ : Optional[int] =dropout
lowerCamelCase__ : Tuple =attention_dropout
lowerCamelCase__ : Optional[Any] =gelu_activation
lowerCamelCase__ : Any =sinusoidal_embeddings
lowerCamelCase__ : List[Any] =causal
lowerCamelCase__ : List[str] =asm
lowerCamelCase__ : Optional[Any] =n_langs
lowerCamelCase__ : Optional[Any] =use_lang_emb
lowerCamelCase__ : int =layer_norm_eps
lowerCamelCase__ : Union[str, Any] =bos_index
lowerCamelCase__ : List[Any] =eos_index
lowerCamelCase__ : Dict =pad_index
lowerCamelCase__ : Dict =unk_index
lowerCamelCase__ : Tuple =mask_index
lowerCamelCase__ : Dict =is_encoder
lowerCamelCase__ : Optional[Any] =max_position_embeddings
lowerCamelCase__ : List[Any] =embed_init_std
lowerCamelCase__ : int =init_std
lowerCamelCase__ : Optional[int] =summary_type
lowerCamelCase__ : Optional[Any] =summary_use_proj
lowerCamelCase__ : List[str] =summary_activation
lowerCamelCase__ : str =summary_proj_to_labels
lowerCamelCase__ : Dict =summary_first_dropout
lowerCamelCase__ : Optional[int] =start_n_top
lowerCamelCase__ : Any =end_n_top
lowerCamelCase__ : Tuple =mask_token_id
lowerCamelCase__ : Optional[Any] =lang_id
if "n_words" in kwargs:
lowerCamelCase__ : Dict =kwargs['''n_words''']
super().__init__(pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def snake_case ( self : str )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : str ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : List[Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 238
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : int = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'deberta-v2'
def __init__( self : Optional[Any], lowerCamelCase : Optional[int]=12_8100, lowerCamelCase : List[Any]=1536, lowerCamelCase : Dict=24, lowerCamelCase : Any=24, lowerCamelCase : Union[str, Any]=6144, lowerCamelCase : List[Any]="gelu", lowerCamelCase : int=0.1, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : Union[str, Any]=512, lowerCamelCase : Optional[Any]=0, lowerCamelCase : Any=0.02, lowerCamelCase : int=1E-7, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Union[str, Any]=-1, lowerCamelCase : Tuple=0, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : int=None, lowerCamelCase : Dict=0, lowerCamelCase : Tuple="gelu", **lowerCamelCase : Optional[int], )-> Union[str, Any]:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : Optional[int] =num_hidden_layers
lowerCamelCase__ : Optional[Any] =num_attention_heads
lowerCamelCase__ : List[Any] =intermediate_size
lowerCamelCase__ : int =hidden_act
lowerCamelCase__ : Tuple =hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] =max_position_embeddings
lowerCamelCase__ : int =type_vocab_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Tuple =relative_attention
lowerCamelCase__ : Optional[Any] =max_relative_positions
lowerCamelCase__ : List[Any] =pad_token_id
lowerCamelCase__ : int =position_biased_input
# Backwards compatibility
if type(lowerCamelCase ) == str:
lowerCamelCase__ : Union[str, Any] =[x.strip() for x in pos_att_type.lower().split('''|''' )]
lowerCamelCase__ : Tuple =pos_att_type
lowerCamelCase__ : Union[str, Any] =vocab_size
lowerCamelCase__ : Optional[int] =layer_norm_eps
lowerCamelCase__ : Dict =kwargs.get('''pooler_hidden_size''', lowerCamelCase )
lowerCamelCase__ : Tuple =pooler_dropout
lowerCamelCase__ : List[Any] =pooler_hidden_act
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def snake_case ( self : List[str] )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : Union[str, Any] ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : Any ={0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def snake_case ( self : List[str] )-> int:
return 12
def snake_case ( self : str, lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional["TensorType"] = None, lowerCamelCase : int = 3, lowerCamelCase : int = 40, lowerCamelCase : int = 40, lowerCamelCase : "PreTrainedTokenizerBase" = None, )-> Mapping[str, Any]:
lowerCamelCase__ : List[Any] =super().generate_dummy_inputs(preprocessor=lowerCamelCase, framework=lowerCamelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 238
| 1
|
from __future__ import annotations
from collections import Counter
from random import random
class __lowercase :
"""simple docstring"""
def __init__( self ) -> str:
snake_case : Optional[Any] = {}
def UpperCAmelCase ( self , A ) -> None:
snake_case : int = {}
def UpperCAmelCase ( self , A , A , A ) -> None:
if nodea not in self.connections:
self.add_node(A )
if nodea not in self.connections:
self.add_node(A )
snake_case : Tuple = probability
def UpperCAmelCase ( self ) -> list[str]:
return list(self.connections )
def UpperCAmelCase ( self , A ) -> str:
snake_case : List[Any] = 0
snake_case : Union[str, Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> dict[str, int]:
snake_case : str = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase ,lowercase ,lowercase )
snake_case : Any = Counter(graph.get_nodes() )
snake_case : List[str] = start
for _ in range(lowercase ):
snake_case : List[Any] = graph.transition(lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
import functools
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
# Validation
if not isinstance(lowercase ,lowercase ) or not all(isinstance(lowercase ,lowercase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(lowercase ) != 3 or not all(isinstance(lowercase ,lowercase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(lowercase ) == 0:
return 0
if min(lowercase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(lowercase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
snake_case : List[str] = set(lowercase )
@functools.cache
def dynamic_programming(lowercase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176
| 0
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str]=0.999 , __snake_case : str="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__snake_case : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__snake_case : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
UpperCAmelCase_ : Dict = []
for i in range(__snake_case ):
UpperCAmelCase_ : Dict = i / num_diffusion_timesteps
UpperCAmelCase_ : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = [e.name for e in KarrasDiffusionSchedulers]
_snake_case : int = 2
@register_to_config
def __init__( self , _UpperCamelCase = 1_0_0_0 , _UpperCamelCase = 0.0_00_85 , _UpperCamelCase = 0.0_12 , _UpperCamelCase = "linear" , _UpperCamelCase = None , _UpperCamelCase = "epsilon" , _UpperCamelCase = "linspace" , _UpperCamelCase = 0 , ) -> Optional[Any]:
if trained_betas is not None:
UpperCAmelCase_ : Any = torch.tensor(_UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ : Optional[int] = torch.linspace(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ : str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ : List[str] = betas_for_alpha_bar(_UpperCamelCase )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
UpperCAmelCase_ : Dict = 1.0 - self.betas
UpperCAmelCase_ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[Any]:
if schedule_timesteps is None:
UpperCAmelCase_ : Optional[Any] = self.timesteps
UpperCAmelCase_ : Any = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ : int = 1 if len(_UpperCamelCase ) > 1 else 0
else:
UpperCAmelCase_ : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(_UpperCamelCase ) else timestep
UpperCAmelCase_ : Optional[int] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ) -> Optional[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , ) -> torch.FloatTensor:
UpperCAmelCase_ : List[Any] = self.index_for_timestep(_UpperCamelCase )
if self.state_in_first_order:
UpperCAmelCase_ : Dict = self.sigmas[step_index]
else:
UpperCAmelCase_ : Union[str, Any] = self.sigmas_interpol[step_index]
UpperCAmelCase_ : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , ) -> List[str]:
UpperCAmelCase_ : Dict = num_inference_steps
UpperCAmelCase_ : str = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ : List[Any] = np.linspace(0 , num_train_timesteps - 1 , _UpperCamelCase , dtype=_UpperCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ : List[Any] = (np.arange(0 , _UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(_UpperCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ : Optional[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ : List[str] = (np.arange(_UpperCamelCase , 0 , -step_ratio )).round().copy().astype(_UpperCamelCase )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
UpperCAmelCase_ : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ : List[str] = torch.from_numpy(np.log(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = np.interp(_UpperCamelCase , np.arange(0 , len(_UpperCamelCase ) ) , _UpperCamelCase )
UpperCAmelCase_ : List[str] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ : str = torch.from_numpy(_UpperCamelCase ).to(device=_UpperCamelCase )
# interpolate sigmas
UpperCAmelCase_ : Optional[int] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
UpperCAmelCase_ : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ : Optional[int] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_UpperCamelCase ).startswith('mps' ):
# mps does not support float64
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_UpperCamelCase ).to(_UpperCamelCase , dtype=torch.floataa )
else:
UpperCAmelCase_ : Any = torch.from_numpy(_UpperCamelCase ).to(_UpperCamelCase )
# interpolate timesteps
UpperCAmelCase_ : Optional[int] = self.sigma_to_t(_UpperCamelCase ).to(_UpperCamelCase , dtype=timesteps.dtype )
UpperCAmelCase_ : Optional[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
UpperCAmelCase_ : Tuple = torch.cat([timesteps[:1], interleaved_timesteps] )
UpperCAmelCase_ : Dict = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ : Dict = defaultdict(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
# get log sigma
UpperCAmelCase_ : List[str] = sigma.log()
# get distribution
UpperCAmelCase_ : Any = log_sigma - self.log_sigmas[:, None]
# get sigmas range
UpperCAmelCase_ : int = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
UpperCAmelCase_ : List[str] = low_idx + 1
UpperCAmelCase_ : List[str] = self.log_sigmas[low_idx]
UpperCAmelCase_ : int = self.log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ : Optional[Any] = (low - log_sigma) / (low - high)
UpperCAmelCase_ : Any = w.clamp(0 , 1 )
# transform interpolation to time range
UpperCAmelCase_ : Union[str, Any] = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ : Optional[Any] = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ) -> int:
return self.sample is None
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True , ) -> Union[SchedulerOutput, Tuple]:
UpperCAmelCase_ : List[Any] = self.index_for_timestep(_UpperCamelCase )
# advance index counter by 1
UpperCAmelCase_ : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(_UpperCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ : List[str] = self.sigmas[step_index]
UpperCAmelCase_ : List[str] = self.sigmas_interpol[step_index + 1]
UpperCAmelCase_ : Optional[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
UpperCAmelCase_ : Union[str, Any] = self.sigmas[step_index - 1]
UpperCAmelCase_ : Any = self.sigmas_interpol[step_index]
UpperCAmelCase_ : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ : Dict = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCAmelCase_ : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ : Any = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCAmelCase_ : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ : Dict = sigma_interpol - sigma_hat
# store for 2nd order step
UpperCAmelCase_ : Any = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
UpperCAmelCase_ : Union[str, Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
UpperCAmelCase_ : Any = sigma_next - sigma_hat
UpperCAmelCase_ : Dict = self.sample
UpperCAmelCase_ : str = None
UpperCAmelCase_ : List[str] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ : Tuple = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_UpperCamelCase ):
# mps does not support float64
UpperCAmelCase_ : Optional[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase_ : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase_ : Any = self.timesteps.to(original_samples.device )
UpperCAmelCase_ : Optional[Any] = timesteps.to(original_samples.device )
UpperCAmelCase_ : Any = [self.index_for_timestep(_UpperCamelCase , _UpperCamelCase ) for t in timesteps]
UpperCAmelCase_ : int = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ : Optional[Any] = sigma.unsqueeze(-1 )
UpperCAmelCase_ : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 29
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = []
for old_item in old_list:
lowerCAmelCase__ : Optional[Any] = old_item.replace("""in_layers.0""" , """norm1""" )
lowerCAmelCase__ : Optional[int] = new_item.replace("""in_layers.2""" , """conv1""" )
lowerCAmelCase__ : Dict = new_item.replace("""out_layers.0""" , """norm2""" )
lowerCAmelCase__ : str = new_item.replace("""out_layers.3""" , """conv2""" )
lowerCAmelCase__ : str = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
lowerCAmelCase__ : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
lowerCAmelCase__ : Union[str, Any] = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ):
"""simple docstring"""
lowerCAmelCase__ : int = []
for old_item in old_list:
lowerCAmelCase__ : List[str] = old_item
lowerCAmelCase__ : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
lowerCAmelCase__ : Optional[Any] = new_item.replace("""norm.bias""" , """group_norm.bias""" )
lowerCAmelCase__ : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
lowerCAmelCase__ : int = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
lowerCAmelCase__ : str = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCAmelCase__ : Any = old_checkpoint[path]
lowerCAmelCase__ : int = old_tensor.shape[0] // 3
lowerCAmelCase__ : int = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCAmelCase__ : Tuple = old_tensor.shape[0] // config["""num_head_channels"""] // 3
lowerCAmelCase__ : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = old_tensor.split(channels // num_heads , dim=1 )
lowerCAmelCase__ : int = query.reshape(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = key.reshape(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = value.reshape(UpperCamelCase )
for path in paths:
lowerCAmelCase__ : Any = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCAmelCase__ : Any = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
lowerCAmelCase__ : Any = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
lowerCAmelCase__ : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCAmelCase__ : Any = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCAmelCase__ : List[Any] = old_checkpoint[path["""old"""]][:, :, 0]
else:
lowerCAmelCase__ : Dict = old_checkpoint[path["""old"""]]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : str = checkpoint["""time_embed.0.weight"""]
lowerCAmelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""]
lowerCAmelCase__ : int = checkpoint["""time_embed.2.weight"""]
lowerCAmelCase__ : List[str] = checkpoint["""time_embed.2.bias"""]
lowerCAmelCase__ : str = checkpoint["""input_blocks.0.0.weight"""]
lowerCAmelCase__ : Any = checkpoint["""input_blocks.0.0.bias"""]
lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.weight"""]
lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.bias"""]
lowerCAmelCase__ : str = checkpoint["""out.2.weight"""]
lowerCAmelCase__ : Tuple = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
lowerCAmelCase__ : Optional[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
lowerCAmelCase__ : Optional[Any] = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
lowerCAmelCase__ : Union[str, Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
lowerCAmelCase__ : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the output blocks only
lowerCAmelCase__ : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
lowerCAmelCase__ : List[Any] = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(UpperCamelCase )
}
for i in range(1 , UpperCamelCase ):
lowerCAmelCase__ : Dict = (i - 1) // (config["""num_res_blocks"""] + 1)
lowerCAmelCase__ : Tuple = (i - 1) % (config["""num_res_blocks"""] + 1)
lowerCAmelCase__ : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
lowerCAmelCase__ : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
lowerCAmelCase__ : Optional[int] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
lowerCAmelCase__ : Tuple = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
lowerCAmelCase__ : Optional[Any] = renew_resnet_paths(UpperCamelCase )
lowerCAmelCase__ : Dict = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
lowerCAmelCase__ : Optional[Any] = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase )
if len(UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = renew_attention_paths(UpperCamelCase )
lowerCAmelCase__ : Tuple = {
"""old""": f"""input_blocks.{i}.1""",
"""new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCAmelCase__ : List[str] = {
f"""input_blocks.{i}.1.qkv.bias""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase , )
lowerCAmelCase__ : Dict = middle_blocks[0]
lowerCAmelCase__ : Union[str, Any] = middle_blocks[1]
lowerCAmelCase__ : Dict = middle_blocks[2]
lowerCAmelCase__ : Any = renew_resnet_paths(UpperCamelCase )
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase )
lowerCAmelCase__ : Dict = renew_resnet_paths(UpperCamelCase )
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = renew_attention_paths(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
UpperCamelCase , UpperCamelCase , UpperCamelCase , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase )
for i in range(UpperCamelCase ):
lowerCAmelCase__ : Tuple = i // (config["""num_res_blocks"""] + 1)
lowerCAmelCase__ : List[str] = i % (config["""num_res_blocks"""] + 1)
lowerCAmelCase__ : int = [shave_segments(UpperCamelCase , 2 ) for name in output_blocks[i]]
lowerCAmelCase__ : Union[str, Any] = {}
for layer in output_block_layers:
lowerCAmelCase__ , lowerCAmelCase__ : Any = layer.split(""".""" )[0], shave_segments(UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCamelCase )
else:
lowerCAmelCase__ : str = [layer_name]
if len(UpperCamelCase ) > 1:
lowerCAmelCase__ : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
lowerCAmelCase__ : Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
lowerCAmelCase__ : Optional[int] = renew_resnet_paths(UpperCamelCase )
lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCAmelCase__ : List[Any] = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
lowerCAmelCase__ : int = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
lowerCAmelCase__ : int = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(UpperCamelCase ) == 2:
lowerCAmelCase__ : Tuple = []
if len(UpperCamelCase ):
lowerCAmelCase__ : Dict = renew_attention_paths(UpperCamelCase )
lowerCAmelCase__ : Tuple = {
"""old""": f"""output_blocks.{i}.1""",
"""new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCAmelCase__ : Tuple = {
f"""output_blocks.{i}.1.qkv.bias""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=UpperCamelCase , )
else:
lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCAmelCase__ : Tuple = """.""".join(["""output_blocks""", str(UpperCamelCase ), path["""old"""]] )
lowerCAmelCase__ : List[Any] = """.""".join(["""up_blocks""", str(UpperCamelCase ), """resnets""", str(UpperCamelCase ), path["""new"""]] )
lowerCAmelCase__ : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowerCAmelCase = json.loads(f.read())
_lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 37
| 0
|
import heapq
def A_ ( _lowerCAmelCase ) -> set[int]:
UpperCamelCase : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_lowerCAmelCase , [-1 * len(_lowerCAmelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCamelCase : Tuple = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCamelCase : Optional[int] = heapq.heappop(_lowerCAmelCase )[1][0]
chosen_vertices.add(_lowerCAmelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCamelCase : Tuple = elem[1][1].index(_lowerCAmelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_lowerCAmelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 140
|
__lowerCamelCase : Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def A_ ( _lowerCAmelCase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : str = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_lowerCAmelCase )
UpperCamelCase : Optional[int] = "".join(bin(_lowerCAmelCase )[2:].zfill(8 ) for byte in data )
UpperCamelCase : str = len(_lowerCAmelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCamelCase : Optional[Any] = b"=" * ((6 - len(_lowerCAmelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowerCAmelCase ) % 6)
else:
UpperCamelCase : List[Any] = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowerCAmelCase ) , 6 ) ).encode()
+ padding
)
def A_ ( _lowerCAmelCase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : List[Any] = (
"argument should be a bytes-like object or ASCII string, "
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_lowerCAmelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
UpperCamelCase : Any = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
UpperCamelCase : Union[str, Any] = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowerCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCamelCase : List[str] = encoded_data[:-padding]
UpperCamelCase : List[Any] = "".join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCamelCase : List[Any] = "".join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCamelCase : Any = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowerCAmelCase ) , 8 )
]
return bytes(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
| 1
|
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase = {
"Salesforce/codegen-350M-mono": 2048,
}
class lowerCamelCase (UpperCamelCase_ ):
'''simple docstring'''
_snake_case : str = VOCAB_FILES_NAMES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : int = ["""input_ids""", """attention_mask"""]
_snake_case : Optional[Any] = CodeGenTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase=False , **_UpperCamelCase , ) -> Dict:
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , add_prefix_space=_a , **_a , )
if kwargs.pop('add_bos_token' , _a ):
UpperCAmelCase_ : List[str] = kwargs.pop('name_or_path' , '' )
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
f"`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n"
f"`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n"
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.' )
UpperCAmelCase_ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _a ) != add_prefix_space:
UpperCAmelCase_ : Any = getattr(_a , pre_tok_state.pop('type' ) )
UpperCAmelCase_ : Optional[int] = add_prefix_space
UpperCAmelCase_ : Union[str, Any] = pre_tok_class(**_a )
UpperCAmelCase_ : Tuple = add_prefix_space
def __UpperCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ) -> BatchEncoding:
UpperCAmelCase_ : Union[str, Any] = kwargs.get('is_split_into_words' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ) -> BatchEncoding:
UpperCAmelCase_ : Optional[Any] = kwargs.get('is_split_into_words' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : int = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> str:
UpperCAmelCase_ : Optional[Any] = super().decode(
token_ids=_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , **_a , )
if truncate_before_pattern is not None and len(_a ) > 0:
UpperCAmelCase_ : Any = self.truncate(_a , _a )
return decoded_text
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
def find_re(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = pattern.search(_a , _a )
return m.start() if m else -1
UpperCAmelCase_ : int = [re.compile(_a , re.MULTILINE ) for pattern in truncate_before_pattern]
UpperCAmelCase_ : List[Any] = list(re.finditer('^print' , _a , re.MULTILINE ) )
if len(_a ) > 1:
UpperCAmelCase_ : List[Any] = completion[: prints[1].start()]
UpperCAmelCase_ : Optional[int] = list(re.finditer('^def' , _a , re.MULTILINE ) )
if len(_a ) > 1:
UpperCAmelCase_ : Dict = completion[: defs[1].start()]
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Dict = [
pos for pos in [find_re(_a , _a , _a ) for terminal in terminals] if pos != -1
]
if len(_a ) > 0:
return completion[: min(_a )]
else:
return completion
| 29
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1E-12 ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCAmelCase , axis=1 ) , a_min=__lowerCAmelCase ) ).T
SCREAMING_SNAKE_CASE__ : str = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCAmelCase , axis=1 ) , a_min=__lowerCAmelCase ) ).T
return jnp.matmul(__lowerCAmelCase , norm_emb_a.T )
class __a (nn.Module):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :CLIPConfig
_SCREAMING_SNAKE_CASE :jnp.dtype = jnp.floataa
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Dense(self.config.projection_dim , use_bias=_a , dtype=self.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
SCREAMING_SNAKE_CASE__ : Any = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.vision_model(_a )[1]
SCREAMING_SNAKE_CASE__ : str = self.visual_projection(_a )
SCREAMING_SNAKE_CASE__ : List[str] = jax_cosine_distance(_a , self.special_care_embeds )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jax_cosine_distance(_a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE__ : int = 0.0
SCREAMING_SNAKE_CASE__ : Optional[int] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE__ : Dict = jnp.round(_a , 3 )
SCREAMING_SNAKE_CASE__ : Dict = jnp.any(special_scores > 0 , axis=1 , keepdims=_a )
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE__ : Any = is_special_care * 0.01
SCREAMING_SNAKE_CASE__ : List[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.round(_a , 3 )
SCREAMING_SNAKE_CASE__ : List[str] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = CLIPConfig
_SCREAMING_SNAKE_CASE :Union[str, Any] = """clip_input"""
_SCREAMING_SNAKE_CASE :Dict = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , _a , _a = None , _a = 0 , _a = jnp.floataa , _a = True , **_a , ) -> Optional[int]:
"""simple docstring"""
if input_shape is None:
SCREAMING_SNAKE_CASE__ : List[Any] = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE__ : Any = self.module_class(config=_a , dtype=_a , **_a )
super().__init__(_a , _a , input_shape=_a , seed=_a , dtype=_a , _do_init=_do_init )
def _a ( self , _a , _a , _a = None ) -> FrozenDict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = jax.random.normal(_a , _a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = jax.random.split(_a )
SCREAMING_SNAKE_CASE__ : List[str] = {"""params""": params_rng, """dropout""": dropout_rng}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.module.init(_a , _a )["""params"""]
return random_params
def __call__( self , _a , _a = None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = jnp.transpose(_a , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(_a , dtype=jnp.floataa ) , rngs={} , )
| 132
| 0
|
import math
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_UpperCAmelCase )
if number < 1:
lowerCAmelCase__ : int = F'''Input value of [number={number}] must be > 0'''
raise ValueError(_UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCAmelCase__ : Optional[int] = int(math.log(number // 3 , 2 ) ) + 2
lowerCAmelCase__ : str = [3, 5]
lowerCAmelCase__ : str = 2
lowerCAmelCase__ : Optional[Any] = 3
for block in range(1 , _UpperCAmelCase ):
for _ in range(_UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowerCamelCase__ = 0
try:
lowerCamelCase__ = proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 359
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCAmelCase_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=snake_case__ , required=snake_case__ , help="""Model\'s type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=snake_case__ , required=snake_case__ , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=snake_case__ , required=snake_case__ , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=snake_case__ , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=snake_case__ , default=snake_case__ , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=snake_case__ )
def __init__( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , *_UpperCAmelCase : int , ):
"""simple docstring"""
UpperCAmelCase__ = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f'''Loading model {model_type}''' )
UpperCAmelCase__ = model_type
UpperCAmelCase__ = tf_checkpoint
UpperCAmelCase__ = pytorch_dump_output
UpperCAmelCase__ = config
UpperCAmelCase__ = finetuning_task_name
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase__ = self._tf_checkpoint
UpperCAmelCase__ = ''''''
else:
UpperCAmelCase__ = self._tf_checkpoint
UpperCAmelCase__ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
snake_case__ , self._config , self._pytorch_dump_output , snake_case__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 346
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : List[str] =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : List[Any] =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCAmelCase : List[str] =model(snake_case__ , decoder_input_ids=snake_case__ ).logits
UpperCAmelCase : Any =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean()
UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item())
UpperCAmelCase : List[str] =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348
| 0
|
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
return 1 if input_a == input_a else 0
def snake_case__ ( ) -> str:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 357
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Any = self.prepare_image_inputs()
A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : int = '''lower newer'''
A_ : str = processor(text=_lowerCamelCase )
A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : List[Any] = '''lower newer'''
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = '''lower newer'''
A_ : List[str] = self.prepare_image_inputs()
A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: Union[str, Any] = KandinskyImgaImgPipeline
A: Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
A: int = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
A: List[str] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
A: Any = False
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
return 100
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCamelCase__ : Tuple = MultilingualCLIP(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : str = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCamelCase__ : str = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def UpperCAmelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : int = self.dummy_tokenizer
UpperCamelCase__ : Optional[int] = self.dummy_unet
UpperCamelCase__ : Optional[int] = self.dummy_movq
UpperCamelCase__ : int = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
UpperCamelCase__ : str = DDIMScheduler(**lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple=0 ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCamelCase__ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCamelCase__ )
# create init_image
UpperCamelCase__ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCamelCase__ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ : List[str] = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
if str(lowerCamelCase__ ).startswith('''mps''' ):
UpperCamelCase__ : Dict = torch.manual_seed(lowerCamelCase__ )
else:
UpperCamelCase__ : Optional[Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = '''cpu'''
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : Union[str, Any] = self.pipeline_class(**lowerCamelCase__ )
UpperCamelCase__ : str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ : Dict = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCamelCase__ : List[Any] = output.images
UpperCamelCase__ : Dict = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : str = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
UpperCamelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCamelCase__ : Any = '''A red cartoon frog, 4k'''
UpperCamelCase__ : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
UpperCamelCase__ : Dict = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
UpperCamelCase__ : Any = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ , UpperCamelCase__ : str = pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCamelCase__ : int = pipeline(
lowerCamelCase__ , image=lowerCamelCase__ , image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
UpperCamelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 146
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCamelCase : str = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE : torch.nn.Module , SCREAMING_SNAKE_CASE : BnbQuantizationConfig , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE : bool = False , ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = bnb_quantization_config.load_in_abit
UpperCamelCase__ : List[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
UpperCamelCase__ : int = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
UpperCamelCase__ : int = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase__ : List[Any] = get_keys_to_not_convert(SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : List[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE )
# compatibility with peft
UpperCamelCase__ : Optional[Any] = load_in_abit
UpperCamelCase__ : List[str] = load_in_abit
UpperCamelCase__ : str = get_parameter_device(SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
UpperCamelCase__ : Union[str, Any] = replace_with_bnb_layers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE )
# convert param to the right dtype
UpperCamelCase__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase__ : Union[str, Any] = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
UpperCamelCase__ : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE ):
param.to(SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"The model device type is {model_device.type}. However, cuda is needed for quantization."
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
UpperCamelCase__ : str = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_memory=SCREAMING_SNAKE_CASE , no_split_module_classes=SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase__ : Dict = True
UpperCamelCase__ : str = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE , offload_state_dict=SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE , device_map=SCREAMING_SNAKE_CASE , offload_dir=SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : str=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase__ : int = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
UpperCamelCase__ : str = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Union[str, Any] = special_dtypes
UpperCamelCase__ : Optional[int] = no_split_module_classes
UpperCamelCase__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase__ : Dict = get_balanced_memory(
SCREAMING_SNAKE_CASE , low_zero=(device_map == '''balanced_low_0''') , max_memory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Tuple = max_memory
UpperCamelCase__ : Dict = infer_auto_device_map(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
UpperCamelCase__ : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase__ : Dict = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
if modules_to_not_convert is None:
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ , UpperCamelCase__ : Dict = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase__ : str = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase__ : Tuple = []
current_key_name.append(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase__ : int = '''.'''.join(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase__ : int = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase__ : Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
UpperCamelCase__ : List[Any] = module.weight.data
if module.bias is not None:
UpperCamelCase__ : List[str] = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE )
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = True
if len(list(module.children() ) ) > 0:
UpperCamelCase__ , UpperCamelCase__ : Tuple = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
with init_empty_weights():
UpperCamelCase__ : Dict = deepcopy(SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase__ : str = find_tied_parameters(SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[str] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase__ : int = sum(SCREAMING_SNAKE_CASE , [] )
UpperCamelCase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
UpperCamelCase__ : str = False
if hasattr(SCREAMING_SNAKE_CASE , '''base_model_prefix''' ):
UpperCamelCase__ : int = not hasattr(SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase__ : Tuple = list(model.named_children() )
UpperCamelCase__ : str = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase__ : Dict = set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = list(set(SCREAMING_SNAKE_CASE ) ) + list(SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
UpperCamelCase__ : int = ['''.weight''', '''.bias''']
UpperCamelCase__ : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase__ : int = name.replace(SCREAMING_SNAKE_CASE , '''''' )
filtered_module_names.append(SCREAMING_SNAKE_CASE )
return filtered_module_names
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def _a ( SCREAMING_SNAKE_CASE : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 , dtype=SCREAMING_SNAKE_CASE , value=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = param_name
UpperCamelCase__ : str = model
if "." in tensor_name:
UpperCamelCase__ : List[Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase__ : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
UpperCamelCase__ : Optional[int] = new_module
UpperCamelCase__ : List[str] = splits[-1]
# offload weights
UpperCamelCase__ : Any = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE , )
else:
offload_weight(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
offload_weight(SCREAMING_SNAKE_CASE , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''meta''' , dtype=SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 146
| 1
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class A__ ( __snake_case ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 140
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__lowerCamelCase : Any = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__lowerCamelCase : str = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Tuple = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_lowerCAmelCase )[0]
@deprecated(_lowerCAmelCase , "Please use tf.data to implement this functionality." )
def A_ ( _lowerCAmelCase ) -> int:
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
UpperCamelCase : Dict = _readaa(_lowerCAmelCase )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
UpperCamelCase : Optional[int] = _readaa(_lowerCAmelCase )
UpperCamelCase : int = _readaa(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = _readaa(_lowerCAmelCase )
UpperCamelCase : List[Any] = bytestream.read(rows * cols * num_images )
UpperCamelCase : List[str] = numpy.frombuffer(_lowerCAmelCase , dtype=numpy.uinta )
UpperCamelCase : Optional[Any] = data.reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
return data
@deprecated(_lowerCAmelCase , "Please use tf.one_hot on tensors." )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
UpperCamelCase : List[str] = labels_dense.shape[0]
UpperCamelCase : str = numpy.arange(_lowerCAmelCase ) * num_classes
UpperCamelCase : Optional[Any] = numpy.zeros((num_labels, num_classes) )
UpperCamelCase : Dict = 1
return labels_one_hot
@deprecated(_lowerCAmelCase , "Please use tf.data to implement this functionality." )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=10 ) -> str:
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
UpperCamelCase : int = _readaa(_lowerCAmelCase )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
UpperCamelCase : List[str] = _readaa(_lowerCAmelCase )
UpperCamelCase : List[Any] = bytestream.read(_lowerCAmelCase )
UpperCamelCase : List[str] = numpy.frombuffer(_lowerCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_lowerCAmelCase , _lowerCAmelCase )
return labels
class A__ :
@deprecated(
A_ , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : int = random_seed.get_seed(A_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
UpperCamelCase : Optional[Any] = dtypes.as_dtype(A_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
UpperCamelCase : List[str] = 1_0000
UpperCamelCase : int = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"""images.shape: {images.shape} labels.shape: {labels.shape}"""
UpperCamelCase : Optional[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
UpperCamelCase : int = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
UpperCamelCase : str = images.astype(numpy.floataa )
UpperCamelCase : str = numpy.multiply(A_ , 1.0 / 2_55.0 )
UpperCamelCase : Optional[int] = images
UpperCamelCase : str = labels
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : Optional[int] = 0
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._images
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._labels
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._num_examples
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._epochs_completed
def __UpperCamelCase( self , A_ , A_=False , A_=True ):
'''simple docstring'''
if fake_data:
UpperCamelCase : Optional[int] = [1] * 784
UpperCamelCase : Optional[Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A_ )],
[fake_label for _ in range(A_ )],
)
UpperCamelCase : Optional[Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
UpperCamelCase : Optional[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
UpperCamelCase : int = self.images[perma]
UpperCamelCase : Any = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
UpperCamelCase : List[Any] = self._num_examples - start
UpperCamelCase : Union[str, Any] = self._images[start : self._num_examples]
UpperCamelCase : str = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
UpperCamelCase : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
UpperCamelCase : Union[str, Any] = self.images[perm]
UpperCamelCase : Union[str, Any] = self.labels[perm]
# Start next epoch
UpperCamelCase : Tuple = 0
UpperCamelCase : Tuple = batch_size - rest_num_examples
UpperCamelCase : List[str] = self._index_in_epoch
UpperCamelCase : Dict = self._images[start:end]
UpperCamelCase : int = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
UpperCamelCase : Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCAmelCase , "Please write your own downloading logic." )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if not gfile.Exists(_lowerCAmelCase ):
gfile.MakeDirs(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if not gfile.Exists(_lowerCAmelCase ):
urllib.request.urlretrieve(_lowerCAmelCase , _lowerCAmelCase ) # noqa: S310
with gfile.GFile(_lowerCAmelCase ) as f:
UpperCamelCase : Optional[int] = f.size()
print("Successfully downloaded" , _lowerCAmelCase , _lowerCAmelCase , "bytes." )
return filepath
@deprecated(
_lowerCAmelCase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=dtypes.floataa , _lowerCAmelCase=True , _lowerCAmelCase=5000 , _lowerCAmelCase=None , _lowerCAmelCase=DEFAULT_SOURCE_URL , ) -> List[str]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_lowerCAmelCase , one_hot=_lowerCAmelCase , dtype=_lowerCAmelCase , seed=_lowerCAmelCase )
UpperCamelCase : Any = fake()
UpperCamelCase : List[str] = fake()
UpperCamelCase : Union[str, Any] = fake()
return _Datasets(train=_lowerCAmelCase , validation=_lowerCAmelCase , test=_lowerCAmelCase )
if not source_url: # empty string check
UpperCamelCase : str = DEFAULT_SOURCE_URL
UpperCamelCase : List[str] = "train-images-idx3-ubyte.gz"
UpperCamelCase : Optional[int] = "train-labels-idx1-ubyte.gz"
UpperCamelCase : List[str] = "t10k-images-idx3-ubyte.gz"
UpperCamelCase : Union[str, Any] = "t10k-labels-idx1-ubyte.gz"
UpperCamelCase : Optional[int] = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + train_images_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : List[str] = _extract_images(_lowerCAmelCase )
UpperCamelCase : Dict = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + train_labels_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : List[Any] = _extract_labels(_lowerCAmelCase , one_hot=_lowerCAmelCase )
UpperCamelCase : Any = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + test_images_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : Any = _extract_images(_lowerCAmelCase )
UpperCamelCase : List[str] = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + test_labels_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : str = _extract_labels(_lowerCAmelCase , one_hot=_lowerCAmelCase )
if not 0 <= validation_size <= len(_lowerCAmelCase ):
UpperCamelCase : Any = (
"Validation size should be between 0 and "
F"""{len(_lowerCAmelCase )}. Received: {validation_size}."""
)
raise ValueError(_lowerCAmelCase )
UpperCamelCase : str = train_images[:validation_size]
UpperCamelCase : int = train_labels[:validation_size]
UpperCamelCase : List[str] = train_images[validation_size:]
UpperCamelCase : Union[str, Any] = train_labels[validation_size:]
UpperCamelCase : List[str] = {"dtype": dtype, "reshape": reshape, "seed": seed}
UpperCamelCase : List[str] = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
UpperCamelCase : List[str] = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
UpperCamelCase : Any = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
return _Datasets(train=_lowerCAmelCase , validation=_lowerCAmelCase , test=_lowerCAmelCase )
| 140
| 1
|
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :int )->str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
snake_case_ = str(bin(lowerCAmelCase_ ) )[2:] # remove the leading "0b"
snake_case_ = str(bin(lowerCAmelCase_ ) )[2:]
snake_case_ = max(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase_ ) , b_binary.zfill(lowerCAmelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = (16, 32, 96, 256)
_SCREAMING_SNAKE_CASE = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case_ = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case_ = self.block_out_channels[i]
snake_case_ = self.block_out_channels[i + 1]
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowerCAmelCase )
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowerCAmelCase )
snake_case_ = blocks
snake_case_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = self.conv_in(_lowerCAmelCase )
snake_case_ = nn.silu(_lowerCAmelCase )
for block in self.blocks:
snake_case_ = block(_lowerCAmelCase )
snake_case_ = nn.silu(_lowerCAmelCase )
snake_case_ = self.conv_out(_lowerCAmelCase )
return embedding
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , a , a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = (320, 640, 1280, 1280)
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = 1280
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = jnp.floataa
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = "rgb"
_SCREAMING_SNAKE_CASE = (16, 32, 96, 256)
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : jax.random.KeyArray ) -> FrozenDict:
"""simple docstring"""
# init input tensors
snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case_ = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
snake_case_ = jnp.ones((1,) , dtype=jnp.intaa )
snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case_ = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
snake_case_ , snake_case_ = jax.random.split(_lowerCAmelCase )
snake_case_ = {"params": params_rng, "dropout": dropout_rng}
return self.init(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["params"]
def lowerCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
snake_case_ = self.block_out_channels
snake_case_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case_ = self.num_attention_heads or self.attention_head_dim
# input
snake_case_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case_ = FlaxTimestepEmbedding(_lowerCAmelCase , dtype=self.dtype )
snake_case_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
snake_case_ = self.only_cross_attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case_ = []
snake_case_ = []
snake_case_ = block_out_channels[0]
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
snake_case_ = i == len(_lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case_ = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
snake_case_ = FlaxDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCAmelCase )
for _ in range(self.layers_per_block ):
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
if not is_final_block:
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
snake_case_ = down_blocks
snake_case_ = controlnet_down_blocks
# mid
snake_case_ = block_out_channels[-1]
snake_case_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=_lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
snake_case_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case_ = jnp.flip(_lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(_lowerCAmelCase , jnp.ndarray ):
snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps.astype(dtype=jnp.floataa )
snake_case_ = jnp.expand_dims(_lowerCAmelCase , 0 )
snake_case_ = self.time_proj(_lowerCAmelCase )
snake_case_ = self.time_embedding(_lowerCAmelCase )
# 2. pre-process
snake_case_ = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
snake_case_ = self.conv_in(_lowerCAmelCase )
snake_case_ = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
snake_case_ = self.controlnet_cond_embedding(_lowerCAmelCase )
sample += controlnet_cond
# 3. down
snake_case_ = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ , snake_case_ = down_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
else:
snake_case_ , snake_case_ = down_block(_lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case_ = self.mid_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
snake_case_ = ()
for down_block_res_sample, controlnet_block in zip(_lowerCAmelCase , self.controlnet_down_blocks ):
snake_case_ = controlnet_block(_lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case_ = controlnet_down_block_res_samples
snake_case_ = self.controlnet_mid_block(_lowerCAmelCase )
# 6. scaling
snake_case_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_lowerCAmelCase , mid_block_res_sample=_lowerCAmelCase )
| 159
| 1
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase : int = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(a__ ) )
__SCREAMING_SNAKE_CASE = os.path.join(a__ , """words.txt""" )
__SCREAMING_SNAKE_CASE = """"""
with open(a__ ) as f:
__SCREAMING_SNAKE_CASE = f.readline()
__SCREAMING_SNAKE_CASE = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
__SCREAMING_SNAKE_CASE = [
word
for word in [sum(ord(a__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a__ )
if __name__ == "__main__":
print(solution())
| 331
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = analyze_text(a__ )
__SCREAMING_SNAKE_CASE = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__SCREAMING_SNAKE_CASE = sum(single_char_strings.values() )
# one length string
__SCREAMING_SNAKE_CASE = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__SCREAMING_SNAKE_CASE = single_char_strings[ch]
__SCREAMING_SNAKE_CASE = my_str / all_sum
my_fir_sum += prob * math.loga(a__ ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
__SCREAMING_SNAKE_CASE = sum(two_char_strings.values() )
__SCREAMING_SNAKE_CASE = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__SCREAMING_SNAKE_CASE = cha + cha
if sequence in two_char_strings:
__SCREAMING_SNAKE_CASE = two_char_strings[sequence]
__SCREAMING_SNAKE_CASE = int(a__ ) / all_sum
my_sec_sum += prob * math.loga(a__ )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 331
| 1
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str ,_a : int ,_a : Union[str, Any]=7 ,_a : Union[str, Any]=3 ,_a : Dict=30 ,_a : Union[str, Any]=400 ,_a : str=True ,_a : Tuple=None ,_a : List[str]=True ,_a : Optional[int]=[0.5, 0.5, 0.5] ,_a : Union[str, Any]=[0.5, 0.5, 0.5] ,_a : Optional[int]=True ,_a : Optional[int]=1 / 255 ,_a : int=True ,):
'''simple docstring'''
_a : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_a : List[str] = parent
_a : Any = batch_size
_a : str = num_channels
_a : Optional[Any] = min_resolution
_a : Any = max_resolution
_a : Dict = do_resize
_a : int = size
_a : List[Any] = do_normalize
_a : Union[str, Any] = image_mean
_a : Union[str, Any] = image_std
_a : Union[str, Any] = do_rescale
_a : Tuple = rescale_factor
_a : Any = do_pad
def __lowercase ( self : Dict ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowercase ( self : Any ,_a : Optional[Any] ,_a : Dict=False ):
'''simple docstring'''
if not batched:
_a : Optional[Any] = image_inputs[0]
if isinstance(_a ,Image.Image ):
_a, _a : List[str] = image.size
else:
_a, _a : List[Any] = image.shape[1], image.shape[2]
if w < h:
_a : List[Any] = int(self.size['shortest_edge'] * h / w )
_a : Any = self.size['shortest_edge']
elif w > h:
_a : Dict = self.size['shortest_edge']
_a : str = int(self.size['shortest_edge'] * w / h )
else:
_a : str = self.size['shortest_edge']
_a : Optional[int] = self.size['shortest_edge']
else:
_a : Optional[int] = []
for image in image_inputs:
_a, _a : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_a : int = max(_a ,key=lambda _a : item[0] )[0]
_a : Dict = max(_a ,key=lambda _a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ConditionalDetrImageProcessor if is_vision_available() else None
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[int] = ConditionalDetrImageProcessingTester(self )
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a ,'image_mean' ) )
self.assertTrue(hasattr(_a ,'image_std' ) )
self.assertTrue(hasattr(_a ,'do_normalize' ) )
self.assertTrue(hasattr(_a ,'do_resize' ) )
self.assertTrue(hasattr(_a ,'size' ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,_a )
_a : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,max_size=84 ,pad_and_return_pixel_mask=_a )
self.assertEqual(image_processor.size ,{'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad ,_a )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a ,Image.Image )
# Test not batched input
_a : List[str] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_a, _a : List[str] = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_a, _a : str = self.image_processor_tester.get_expected_values(_a ,batched=_a )
_a : Optional[Any] = image_processing(_a ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,np.ndarray )
# Test not batched input
_a : Any = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_a, _a : Optional[Any] = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_a : List[Any] = image_processing(_a ,return_tensors='pt' ).pixel_values
_a, _a : List[Any] = self.image_processor_tester.get_expected_values(_a ,batched=_a )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,torch.Tensor )
# Test not batched input
_a : List[str] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_a, _a : str = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_a : Tuple = image_processing(_a ,return_tensors='pt' ).pixel_values
_a, _a : Optional[int] = self.image_processor_tester.get_expected_values(_a ,batched=_a )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_a : List[str] = json.loads(f.read() )
_a : Optional[Any] = {'image_id': 3_9769, 'annotations': target}
# encode them
_a : Optional[int] = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
_a : str = image_processing(images=_a ,annotations=_a ,return_tensors='pt' )
# verify pixel values
_a : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,_a )
_a : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,_a ,atol=1E-4 ) )
# verify area
_a : Dict = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,_a ) )
# verify boxes
_a : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,_a )
_a : Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,_a ,atol=1E-3 ) )
# verify image_id
_a : str = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,_a ) )
# verify is_crowd
_a : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,_a ) )
# verify class_labels
_a : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,_a ) )
# verify orig_size
_a : List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,_a ) )
# verify size
_a : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,_a ) )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_a : str = json.loads(f.read() )
_a : str = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_a : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_a : int = ConditionalDetrImageProcessor(format='coco_panoptic' )
_a : int = image_processing(images=_a ,annotations=_a ,masks_path=_a ,return_tensors='pt' )
# verify pixel values
_a : Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,_a )
_a : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,_a ,atol=1E-4 ) )
# verify area
_a : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,_a ) )
# verify boxes
_a : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,_a )
_a : List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,_a ,atol=1E-3 ) )
# verify image_id
_a : Dict = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,_a ) )
# verify is_crowd
_a : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,_a ) )
# verify class_labels
_a : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,_a ) )
# verify masks
_a : Tuple = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,_a )
# verify orig_size
_a : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,_a ) )
# verify size
_a : Dict = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,_a ) )
| 271
|
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
__lowerCAmelCase = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Optional[int] = []
_a : int = len(__a )
for i in range(__a ):
_a : float = -1
for j in range(i + 1 , __a ):
if arr[i] < arr[j]:
_a : Any = arr[j]
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Tuple = []
for i, outer in enumerate(__a ):
_a : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_a : Dict = inner
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : int = len(__a )
_a : list[float] = []
_a : list[float] = [-1] * arr_size
for index in reversed(range(__a ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_a : Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCAmelCase = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 271
| 1
|
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
"""simple docstring"""
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : List[str] = max_length
lowerCAmelCase : Union[str, Any] = max_position_embeddings
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = input_ids.shape[-1]
lowerCAmelCase : List[str] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , snake_case__ , )
lowerCAmelCase : Any = start_length
lowerCAmelCase : Union[str, Any] = max_new_tokens
lowerCAmelCase : Union[str, Any] = start_length + max_new_tokens
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Dict = max_time
lowerCAmelCase : List[Any] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
"""simple docstring"""
return any(criteria(snake_case__ , snake_case__ ) for criteria in self )
@property
def lowercase__ ( self ):
"""simple docstring"""
for stopping_criterium in self:
if isinstance(snake_case__ , snake_case__ ):
return stopping_criterium.max_length
elif isinstance(snake_case__ , snake_case__ ):
return stopping_criterium.max_length
return None
def a__ ( SCREAMING_SNAKE_CASE : StoppingCriteriaList , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Tuple = stopping_criteria.max_length
lowerCAmelCase : Optional[int] = deepcopy(SCREAMING_SNAKE_CASE )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , SCREAMING_SNAKE_CASE )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE ) )
return new_stopping_criteria
| 133
|
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : List[Any] = tmp_path / "cache"
lowerCAmelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : str = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path / "cache"
lowerCAmelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Tuple = features.copy() if features else default_expected_features
lowerCAmelCase : List[str] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : List[str] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = tmp_path / "cache"
lowerCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Tuple = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Dict = [parquet_path]
lowerCAmelCase : Optional[Any] = tmp_path / "cache"
lowerCAmelCase : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Optional[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
lowerCAmelCase : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Tuple = tmp_path / "cache"
lowerCAmelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : Optional[Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Any = tmp_path / "cache"
lowerCAmelCase : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : List[Any] = features.copy() if features else default_expected_features
lowerCAmelCase : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if split:
lowerCAmelCase : List[str] = {split: parquet_path}
else:
lowerCAmelCase : List[str] = "train"
lowerCAmelCase : str = {"train": parquet_path, "test": parquet_path}
lowerCAmelCase : Optional[int] = tmp_path / "cache"
lowerCAmelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : List[str] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCAmelCase : List[str] = pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCAmelCase : Union[str, Any] = pf.read()
assert dataset.data.table == output_table
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : str = str(shared_datadir / "test_image_rgb.jpg" )
lowerCAmelCase : List[str] = {"image": [image_path]}
lowerCAmelCase : str = Features({"image": Image()} )
lowerCAmelCase : Optional[Any] = Dataset.from_dict(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCAmelCase : Any = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase : Optional[int] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
assert get_writer_batch_size(SCREAMING_SNAKE_CASE ) == expected
| 133
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : str = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A ( __snake_case ):
__magic_name__ = '''gpt_neo'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
A : Union[str, Any] = vocab_size
A : Optional[Any] = max_position_embeddings
A : Dict = hidden_size
A : Optional[Any] = num_layers
A : Tuple = num_heads
A : int = intermediate_size
A : Optional[Any] = window_size
A : List[Any] = activation_function
A : Union[str, Any] = resid_dropout
A : Any = embed_dropout
A : List[Any] = attention_dropout
A : str = classifier_dropout
A : List[Any] = layer_norm_epsilon
A : str = initializer_range
A : List[str] = use_cache
A : Optional[int] = bos_token_id
A : List[Any] = eos_token_id
A : int = attention_types
A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : List[str] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
A : Tuple = input.size()
A : Union[str, Any] = len(snake_case__ )
A : List[str] = shape[dimension]
A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ )
A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1
A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None]
A : str = [slice(snake_case__ )] * rank
A : List[Any] = indices
A : Union[str, Any] = input[s]
A : List[str] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
A : List[str] = torch.arange(1 , snake_case__ )
A : Optional[int] = torch.remainder(snake_case__ , snake_case__ )
A : Optional[int] = remainders == 0
A : Optional[Any] = candidates[divisor_indices]
A : Optional[int] = torch.max(snake_case__ )
return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' )
class A ( __snake_case ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' )
A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
A : Dict = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self._config.num_heads
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
"""simple docstring"""
A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A, A : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
A : str = seqlen + 2
A : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A : Any = [
(torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
A : str = common_inputs['''attention_mask''']
if self.use_past:
A : Optional[int] = ordered_inputs['''attention_mask'''].dtype
A : List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return 13
| 3
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : List[Any] = ['pixel_values']
def __init__( self ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = True ,__UpperCAmelCase = 1 / 2_55 ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
super().__init__(**__UpperCAmelCase )
A__ = size if size is not None else {'shortest_edge': 2_56}
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
A__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
A__ = get_size_dict(__UpperCAmelCase ,param_name='crop_size' )
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = offset
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
if "shortest_edge" in size:
A__ = get_resize_output_image_size(__UpperCAmelCase ,size['shortest_edge'] ,default_to_square=__UpperCAmelCase )
elif "height" in size and "width" in size:
A__ = (size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
A__ = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCAmelCase ,size=(size['height'], size['width']) ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> Optional[Any]:
A__ = image.astype(np.floataa )
if offset:
A__ = image - (scale / 2)
return rescale(__UpperCAmelCase ,scale=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
return normalize(__UpperCAmelCase ,mean=__UpperCAmelCase ,std=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
A__ = to_numpy_array(__UpperCAmelCase )
if do_resize:
A__ = self.resize(image=__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase )
if do_center_crop:
A__ = self.center_crop(__UpperCAmelCase ,size=__UpperCAmelCase )
if do_rescale:
A__ = self.rescale(image=__UpperCAmelCase ,scale=__UpperCAmelCase ,offset=__UpperCAmelCase )
if do_normalize:
A__ = self.normalize(image=__UpperCAmelCase ,mean=__UpperCAmelCase ,std=__UpperCAmelCase )
A__ = to_channel_dimension_format(__UpperCAmelCase ,__UpperCAmelCase )
return image
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,**__UpperCAmelCase ,) -> PIL.Image.Image:
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = offset if offset is not None else self.offset
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(__UpperCAmelCase ,param_name='crop_size' )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
A__ = make_batched(__UpperCAmelCase )
A__ = [
[
self._preprocess_image(
image=__UpperCAmelCase ,do_resize=__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ,do_center_crop=__UpperCAmelCase ,crop_size=__UpperCAmelCase ,do_rescale=__UpperCAmelCase ,rescale_factor=__UpperCAmelCase ,offset=__UpperCAmelCase ,do_normalize=__UpperCAmelCase ,image_mean=__UpperCAmelCase ,image_std=__UpperCAmelCase ,data_format=__UpperCAmelCase ,)
for img in video
]
for video in videos
]
A__ = {'pixel_values': videos}
return BatchFeature(data=__UpperCAmelCase ,tensor_type=__UpperCAmelCase )
| 221
| 0
|
import os
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = "matrix.txt" )-> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) as in_file:
snake_case_ = in_file.read()
snake_case_ = [[int(SCREAMING_SNAKE_CASE ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
snake_case_ = [[0 for cell in row] for row in grid]
snake_case_ = len(grid[0] )
snake_case_ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
snake_case_ = grid[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
snake_case_ = grid[0][i] + dp[0][i - 1]
for i in range(1 , SCREAMING_SNAKE_CASE ):
snake_case_ = grid[i][0] + dp[i - 1][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(1 , SCREAMING_SNAKE_CASE ):
snake_case_ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 267
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
for char in word:
snake_case_ = ord(SCREAMING_SNAKE_CASE )
if not _is_chinese_char(SCREAMING_SNAKE_CASE ):
return 0
return 1
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = set()
for token in tokens:
snake_case_ = len(SCREAMING_SNAKE_CASE ) > 1 and is_chinese(SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE )
snake_case_ = list(SCREAMING_SNAKE_CASE )
return word_list
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(SCREAMING_SNAKE_CASE )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , 1 , -1 ):
snake_case_ = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = '''##''' + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
snake_case_ = [get_chinese_word(SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
snake_case_ = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
snake_case_ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
snake_case_ = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE )
input_tokens.append(SCREAMING_SNAKE_CASE )
snake_case_ = add_sub_symbol(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE ) ):
ref_id.append(SCREAMING_SNAKE_CASE )
ref_ids.append(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
return ref_ids
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ = [json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
UpperCAmelCase = parser.parse_args()
main(args)
| 267
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Union[str, Any] ="nllb-moe"
lowerCamelCase : int =["past_key_values"]
lowerCamelCase : str ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[Any] , a : Optional[int]=12_81_12 , a : Optional[Any]=10_24 , a : Optional[int]=12 , a : List[Any]=40_96 , a : Optional[int]=16 , a : Dict=12 , a : Optional[Any]=40_96 , a : List[str]=16 , a : Union[str, Any]=0.05 , a : Any=0.05 , a : Tuple=True , a : Any=True , a : Optional[int]="relu" , a : List[str]=10_24 , a : Any=0.1 , a : Any=0.1 , a : str=0.0 , a : Dict=0.02 , a : int=2 , a : Optional[Any]=True , a : Tuple=False , a : int="float32" , a : Tuple=False , a : Tuple=1_28 , a : List[Any]=64 , a : Dict=4 , a : Any=4 , a : Optional[int]=0.0_01 , a : Union[str, Any]=0.0_01 , a : List[str]="all" , a : Any=False , a : Any=False , a : List[Any]=1.0 , a : Dict=0.2 , a : Any=1 , a : List[str]=0 , a : int=2 , a : Dict=False , **a : List[str] , ):
"""simple docstring"""
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = d_model
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = router_z_loss_coef
__lowerCamelCase = router_aux_loss_coef
__lowerCamelCase = decoder_sparse_step
__lowerCamelCase = encoder_sparse_step
__lowerCamelCase = num_experts
__lowerCamelCase = expert_capacity
__lowerCamelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__lowerCamelCase = router_dtype
__lowerCamelCase = router_ignore_padding_tokens
__lowerCamelCase = batch_prioritized_routing
__lowerCamelCase = second_expert_policy
__lowerCamelCase = normalize_router_prob_before_dropping
__lowerCamelCase = moe_eval_capacity_token_fraction
__lowerCamelCase = moe_token_dropout
__lowerCamelCase = output_router_logits
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 67
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __A :
@staticmethod
def lowercase__ ( *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ):
pass
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Tuple = np.array(_UpperCAmelCase )
lowerCAmelCase : Dict = npimg.shape
return {"hash": hashimage(_UpperCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Dict = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase_ : Any = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ):
lowerCAmelCase : List[str] = MaskGenerationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def lowercase__ ( self : Dict ):
pass
@slow
@require_torch
def lowercase__ ( self : str ):
lowerCAmelCase : Optional[int] = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
lowerCAmelCase : Union[str, Any] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
lowerCAmelCase : List[str] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_21},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.99_67},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_93},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.99_09},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.98_79},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.98_34},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.97_16},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.96_12},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.95_99},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.95_52},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.95_32},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.95_16},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.94_99},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.94_83},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.94_64},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.94_08},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.93_35},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.93_26},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.92_62},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.89_99},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.89_86},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.89_84},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.88_73},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Union[str, Any] = 'facebook/sam-vit-huge'
lowerCAmelCase : str = pipeline('mask-generation' , model=UpperCAmelCase_ )
lowerCAmelCase : int = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowerCAmelCase : Optional[int] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.02_10},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
] , )
| 138
| 0
|
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return (-y * np.log(UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = np.dot(UpperCamelCase , UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase ) ) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=70000 ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(UpperCamelCase ):
lowerCAmelCase__ : str = np.dot(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = sigmoid_function(UpperCamelCase )
lowerCAmelCase__ : List[str] = np.dot(x.T , h - y ) / y.size
lowerCAmelCase__ : List[Any] = theta - alpha * gradient # updating the weights
lowerCAmelCase__ : List[Any] = np.dot(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = sigmoid_function(UpperCamelCase )
lowerCAmelCase__ : List[Any] = cost_function(UpperCamelCase , UpperCamelCase )
if iterations % 100 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_lowerCAmelCase = datasets.load_iris()
_lowerCAmelCase = iris.data[:, :2]
_lowerCAmelCase = (iris.target != 0) * 1
_lowerCAmelCase = 0.1
_lowerCAmelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return sigmoid_function(
np.dot(UpperCamelCase , UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((_lowerCAmelCase) , (_lowerCAmelCase)) = (x[:, 0].min(), x[:, 0].max())
((_lowerCAmelCase) , (_lowerCAmelCase)) = (x[:, 1].min(), x[:, 1].max())
((_lowerCAmelCase) , (_lowerCAmelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_lowerCAmelCase = np.c_[xxa.ravel(), xxa.ravel()]
_lowerCAmelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 184
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_( enum.Enum ):
'''simple docstring'''
__lowercase : Any = 0
__lowercase : List[str] = 1
__lowercase : List[Any] = 2
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase__ : List[Any] = None
if self.model.config.prefix is not None:
lowerCAmelCase__ : Optional[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase__ : Optional[int] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._sanitize_parameters(prefix=__UpperCAmelCase ,**self._forward_params )
lowerCAmelCase__ : int = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase__ : Optional[Any] = {**self._forward_params, **forward_params}
def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Dict:
lowerCAmelCase__ : List[Any] = {}
if prefix is not None:
lowerCAmelCase__ : Any = prefix
if prefix:
lowerCAmelCase__ : Optional[Any] = self.tokenizer(
__UpperCAmelCase ,padding=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=self.framework )
lowerCAmelCase__ : int = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
""" [None, 'hole']""" )
lowerCAmelCase__ : int = handle_long_generation
preprocess_params.update(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = generate_kwargs
lowerCAmelCase__ : Union[str, Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
lowerCAmelCase__ : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
lowerCAmelCase__ : int = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase__ : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase__ : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase__ : Tuple = self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowerCAmelCase__ : Tuple = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*__UpperCAmelCase ,**__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase="" ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[Any] = self.tokenizer(
prefix + prompt_text ,padding=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=self.framework )
lowerCAmelCase__ : Optional[int] = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase__ : Tuple = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase__ : str = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase__ : List[Any] = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase__ : List[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
lowerCAmelCase__ : str = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase__ : Union[str, Any] = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Tuple = model_inputs["""input_ids"""]
lowerCAmelCase__ : Dict = model_inputs.get("""attention_mask""" ,__UpperCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : int = 1
else:
lowerCAmelCase__ : str = input_ids.shape[0]
lowerCAmelCase__ : List[Any] = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase__ : Union[str, Any] = generate_kwargs.pop("""prefix_length""" ,0 )
if prefix_length > 0:
lowerCAmelCase__ : Union[str, Any] = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase__ : Tuple = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase__ : str = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase__ : Optional[int] = self.model.generate(input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : str = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase__ : Dict = generated_sequence.reshape(__UpperCAmelCase ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase__ : Any = tf.reshape(__UpperCAmelCase ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=ReturnType.FULL_TEXT ,__UpperCAmelCase=True ) -> str:
lowerCAmelCase__ : int = model_outputs["""generated_sequence"""][0]
lowerCAmelCase__ : Dict = model_outputs["""input_ids"""]
lowerCAmelCase__ : Tuple = model_outputs["""prompt_text"""]
lowerCAmelCase__ : Optional[Any] = generated_sequence.numpy().tolist()
lowerCAmelCase__ : List[str] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase__ : List[str] = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase__ : Dict = self.tokenizer.decode(
__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase__ : Optional[int] = 0
else:
lowerCAmelCase__ : str = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) )
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase__ : Optional[int] = prompt_text + text[prompt_length:]
else:
lowerCAmelCase__ : List[str] = text[prompt_length:]
lowerCAmelCase__ : List[Any] = {"""generated_text""": all_text}
records.append(__UpperCAmelCase )
return records
| 184
| 1
|
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''bert-generation'''
def __init__( self , lowercase=5_0_3_5_8 , lowercase=1_0_2_4 , lowercase=2_4 , lowercase=1_6 , lowercase=4_0_9_6 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase=2 , lowercase=1 , lowercase="absolute" , lowercase=True , **lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A_ : Any = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Optional[Any] = hidden_act
A_ : Optional[int] = intermediate_size
A_ : Dict = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Any = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : Tuple = position_embedding_type
A_ : Tuple = use_cache
| 140
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase ( __lowercase : dict ,__lowercase : str ,__lowercase : set ,__lowercase : set ,__lowercase : dict ,__lowercase : dict ,__lowercase : PriorityQueue ,__lowercase : dict ,__lowercase : float | int ,):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ : List[str] = cst_fwd.get(__lowercase ,np.inf )
A_ : Any = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ : Any = new_cost_f
A_ : Optional[int] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ : str = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : dict ,__lowercase : dict ):
'''simple docstring'''
A_ : List[str] = -1
A_ : List[Any] = set()
A_ : Union[str, Any] = set()
A_ : int = {source: 0}
A_ : List[Any] = {destination: 0}
A_ : Dict = {source: None}
A_ : Optional[int] = {destination: None}
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ : List[str] = queue_forward.get()
visited_forward.add(__lowercase )
A_ , A_ : Union[str, Any] = queue_backward.get()
visited_backward.add(__lowercase )
A_ : int = pass_and_relaxation(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,)
A_ : str = pass_and_relaxation(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ : int = shortest_distance
return shortest_path_distance
_UpperCAmelCase = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
_UpperCAmelCase = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case :str = logging.get_logger(__name__)
__snake_case :List[str] = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = '''distilbert'''
UpperCamelCase__ : Any = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=30_522 , __SCREAMING_SNAKE_CASE : int=512 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Any=6 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Dict=768 , __SCREAMING_SNAKE_CASE : List[Any]=4 * 768 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.2 , __SCREAMING_SNAKE_CASE : str=0 , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__a = vocab_size
__a = max_position_embeddings
__a = sinusoidal_pos_embds
__a = n_layers
__a = n_heads
__a = dim
__a = hidden_dim
__a = dropout
__a = attention_dropout
__a = activation
__a = initializer_range
__a = qa_dropout
__a = seq_classif_dropout
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_)
class _A ( __UpperCAmelCase ):
@property
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 361
|
def __snake_case ( _UpperCAmelCase = 1000000 ):
__a = limit + 1
__a = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__a = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'{solution() = }')
| 131
| 0
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _UpperCamelCase ( snake_case__ ) -> List[str]:
return 1.0 / (1.0 + np.exp(-_outputs ))
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
__UpperCAmelCase : List[str] = np.max(_outputs, axis=-1, keepdims=snake_case__ )
__UpperCAmelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=snake_case__ )
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[Any] = "sigmoid"
lowerCamelCase__: Dict = "softmax"
lowerCamelCase__: Optional[int] = "none"
@add_end_docstrings(
_lowercase , R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class _snake_case ( _lowercase ):
lowerCamelCase__: List[Any] = False
lowerCamelCase__: Any = ClassificationFunction.NONE
def __init__( self: Union[str, Any] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
super().__init__(**__lowerCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: str="" , **__lowerCamelCase: str ) -> Tuple:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
__UpperCAmelCase : Optional[int] = tokenizer_kwargs
__UpperCAmelCase : str = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
__UpperCAmelCase : List[Any] = self.model.config.return_all_scores
if isinstance(__lowerCamelCase , __lowerCamelCase ) or top_k is None:
__UpperCAmelCase : Dict = top_k
__UpperCAmelCase : str = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , __lowerCamelCase , )
if return_all_scores:
__UpperCAmelCase : Any = None
else:
__UpperCAmelCase : Union[str, Any] = 1
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__UpperCAmelCase : Any = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: Any , *__lowerCamelCase: Dict , **__lowerCamelCase: int ) -> Dict:
__UpperCAmelCase : Any = super().__call__(*__lowerCamelCase , **__lowerCamelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__UpperCAmelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] , __lowerCamelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Dict , **__lowerCamelCase: Optional[int] ) -> Dict[str, GenericTensor]:
__UpperCAmelCase : Tuple = self.framework
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return self.tokenizer(**__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) == 1 and isinstance(inputs[0] , __lowerCamelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__lowerCamelCase , **__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Optional[Any] ) -> List[Any]:
return self.model(**__lowerCamelCase )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Tuple , __lowerCamelCase: List[str]=None , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: int=True ) -> Dict:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__UpperCAmelCase : Union[str, Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__UpperCAmelCase : str = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
__UpperCAmelCase : Any = self.model.config.function_to_apply
else:
__UpperCAmelCase : Optional[Any] = ClassificationFunction.NONE
__UpperCAmelCase : Tuple = model_outputs["logits"][0]
__UpperCAmelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__UpperCAmelCase : Optional[Any] = sigmoid(__lowerCamelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__UpperCAmelCase : Any = softmax(__lowerCamelCase )
elif function_to_apply == ClassificationFunction.NONE:
__UpperCAmelCase : str = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__UpperCAmelCase : int = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCamelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCamelCase : x["score"] , reverse=__lowerCamelCase )
if top_k is not None:
__UpperCAmelCase : Tuple = dict_scores[:top_k]
return dict_scores
| 157
|
from math import sqrt
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = 0
for i in range(1, int(sqrt(snake_case__ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case__ ):
total += i + n // i
elif i == sqrt(snake_case__ ):
total += i
return total - n
def _UpperCamelCase ( snake_case__ = 1_0000 ) -> int:
__UpperCAmelCase : List[str] = sum(
i
for i in range(1, snake_case__ )
if sum_of_divisors(sum_of_divisors(snake_case__ ) ) == i and sum_of_divisors(snake_case__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 157
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowercase : Any = logging.get_logger(__name__)
_lowercase : List[Any] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''deformable_detr'''
UpperCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Any , lowercase_ : Tuple=True , lowercase_ : List[str]=None , lowercase_ : Optional[Any]=3 , lowercase_ : Any=300 , lowercase_ : Tuple=1024 , lowercase_ : Optional[int]=6 , lowercase_ : List[str]=1024 , lowercase_ : Dict=8 , lowercase_ : Dict=6 , lowercase_ : Tuple=1024 , lowercase_ : Optional[int]=8 , lowercase_ : Dict=0.0 , lowercase_ : str=True , lowercase_ : Union[str, Any]="relu" , lowercase_ : List[str]=256 , lowercase_ : List[str]=0.1 , lowercase_ : Any=0.0 , lowercase_ : Any=0.0 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Tuple=1.0 , lowercase_ : Dict=True , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple="sine" , lowercase_ : Dict="resnet50" , lowercase_ : Union[str, Any]=True , lowercase_ : Union[str, Any]=False , lowercase_ : List[str]=4 , lowercase_ : List[Any]=4 , lowercase_ : Union[str, Any]=4 , lowercase_ : Union[str, Any]=False , lowercase_ : List[Any]=300 , lowercase_ : Any=False , lowercase_ : Any=1 , lowercase_ : Union[str, Any]=5 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=1 , lowercase_ : List[Any]=1 , lowercase_ : Optional[int]=5 , lowercase_ : List[str]=2 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[Any]=0.25 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowercase_ : Dict = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowercase_ : str = backbone_config.get("""model_type""" )
lowercase_ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase_ : Union[str, Any] = config_class.from_dict(lowercase_ )
lowercase_ : Tuple = use_timm_backbone
lowercase_ : Dict = backbone_config
lowercase_ : str = num_channels
lowercase_ : List[Any] = num_queries
lowercase_ : str = max_position_embeddings
lowercase_ : Union[str, Any] = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : Dict = encoder_layers
lowercase_ : Tuple = encoder_attention_heads
lowercase_ : Any = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Optional[Any] = decoder_attention_heads
lowercase_ : List[Any] = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : int = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : Optional[Any] = init_xavier_std
lowercase_ : Union[str, Any] = encoder_layerdrop
lowercase_ : Optional[int] = auxiliary_loss
lowercase_ : Optional[Any] = position_embedding_type
lowercase_ : str = backbone
lowercase_ : Any = use_pretrained_backbone
lowercase_ : Optional[int] = dilation
# deformable attributes
lowercase_ : List[Any] = num_feature_levels
lowercase_ : Tuple = encoder_n_points
lowercase_ : Optional[Any] = decoder_n_points
lowercase_ : Optional[int] = two_stage
lowercase_ : Union[str, Any] = two_stage_num_proposals
lowercase_ : str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowercase_ : str = class_cost
lowercase_ : Optional[Any] = bbox_cost
lowercase_ : str = giou_cost
# Loss coefficients
lowercase_ : str = mask_loss_coefficient
lowercase_ : Union[str, Any] = dice_loss_coefficient
lowercase_ : Optional[Any] = bbox_loss_coefficient
lowercase_ : Union[str, Any] = giou_loss_coefficient
lowercase_ : str = eos_coefficient
lowercase_ : List[Any] = focal_alpha
lowercase_ : Union[str, Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return self.d_model
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase_ : str = self.backbone_config.to_dict()
lowercase_ : Dict = self.__class__.model_type
return output
| 353
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = DistilBertTokenizer
UpperCamelCase__ = DistilBertTokenizerFast
UpperCamelCase__ = True
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ )
lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ )
lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 21
| 0
|
from __future__ import annotations
def a ( snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
lowercase_ = []
lowercase_ , lowercase_ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowercase_ = result + left + right
return input_list
def a ( snake_case__: list ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return input_list
lowercase_ = list(snake_case__ )
# iteration for two-way merging
lowercase_ = 2
while p <= len(snake_case__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(snake_case__ ) , snake_case__ ):
lowercase_ = i
lowercase_ = i + p - 1
lowercase_ = (low + high + 1) // 2
lowercase_ = merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# final merge of last two parts
if p * 2 >= len(snake_case__ ):
lowercase_ = i
lowercase_ = merge(snake_case__ , 0 , snake_case__ , len(snake_case__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__a = []
else:
__a = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 30
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4
| 0
|
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
UpperCamelCase_ : str = '''conditional_detr'''
UpperCamelCase_ : Tuple = ['''past_key_values''']
UpperCamelCase_ : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : str=3_00 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=6 , SCREAMING_SNAKE_CASE_ : Any=20_48 , SCREAMING_SNAKE_CASE_ : str=8 , SCREAMING_SNAKE_CASE_ : Tuple=6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE_ : Any=8 , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]="relu" , SCREAMING_SNAKE_CASE_ : Any=2_56 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=1.0 , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Optional[Any]="sine" , SCREAMING_SNAKE_CASE_ : int="resnet50" , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : int=1 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : Tuple=5 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict=0.25 , **SCREAMING_SNAKE_CASE_ : Any , ) -> Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A: Union[str, Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__snake_case , __snake_case ):
A: List[str] = backbone_config.get('''model_type''' )
A: int = CONFIG_MAPPING[backbone_model_type]
A: Union[str, Any] = config_class.from_dict(__snake_case )
A: List[Any] = use_timm_backbone
A: Dict = backbone_config
A: str = num_channels
A: Union[str, Any] = num_queries
A: Tuple = d_model
A: Dict = encoder_ffn_dim
A: Optional[Any] = encoder_layers
A: Union[str, Any] = encoder_attention_heads
A: str = decoder_ffn_dim
A: Any = decoder_layers
A: List[str] = decoder_attention_heads
A: Optional[int] = dropout
A: List[Any] = attention_dropout
A: str = activation_dropout
A: Optional[int] = activation_function
A: Tuple = init_std
A: Optional[Any] = init_xavier_std
A: str = encoder_layerdrop
A: Union[str, Any] = decoder_layerdrop
A: List[Any] = encoder_layers
A: Dict = auxiliary_loss
A: List[str] = position_embedding_type
A: Dict = backbone
A: int = use_pretrained_backbone
A: Optional[Any] = dilation
# Hungarian matcher
A: Optional[Any] = class_cost
A: List[str] = bbox_cost
A: Optional[int] = giou_cost
# Loss coefficients
A: List[Any] = mask_loss_coefficient
A: Optional[int] = dice_loss_coefficient
A: Dict = cls_loss_coefficient
A: List[str] = bbox_loss_coefficient
A: List[str] = giou_loss_coefficient
A: Dict = focal_alpha
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def _snake_case ( self : Dict ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def _snake_case ( self : str ) -> int:
'''simple docstring'''
return self.d_model
def _snake_case ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A: str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A: Union[str, Any] = self.backbone_config.to_dict()
A: List[str] = self.__class__.model_type
return output
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
UpperCamelCase_ : str = version.parse("""1.11""" )
@property
def _snake_case ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _snake_case ( self : int ) -> float:
'''simple docstring'''
return 1E-5
@property
def _snake_case ( self : Any ) -> int:
'''simple docstring'''
return 12
| 371
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = DebertaTokenizer
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : int = DebertaTokenizerFast
def _snake_case ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A: Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
A: int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A: Union[str, Any] = {'''unk_token''': '''[UNK]'''}
A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
A: Optional[int] = '''lower newer'''
A: str = '''lower newer'''
return input_text, output_text
def _snake_case ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A: str = self.get_tokenizer()
A: Any = '''lower newer'''
A: Dict = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokens + [tokenizer.unk_token]
A: int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> Any:
'''simple docstring'''
A: str = self.get_tokenizer()
A: List[str] = tokenizer('''Hello''' , '''World''' )
A: Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE_ )
@slow
def _snake_case ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A: Any = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
A: int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: int = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A: List[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A: Dict = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
A: Dict = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
A: Any = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) for seq in encoding['''input_ids''']]
# fmt: off
A: Any = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A: Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE_ )
for expected, decoded in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 334
| 0
|
'''simple docstring'''
import requests
lowerCamelCase = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 166
|
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCamelCase = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowerCamelCase = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowerCamelCase = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : int=False):
'''simple docstring'''
__lowercase =spearmanr(_lowerCAmelCase , _lowerCAmelCase)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 166
| 1
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_lowercase : List[str] =0B10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_lowercase : Dict =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case__ :
"""simple docstring"""
def __init__( self ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = WATERMARK_BITS
a__ : Union[str, Any] = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
if images.shape[-1] < 2_5_6:
return images
a__ : List[str] = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : Any = [self.encoder.encode(__lowercase , """dwtDct""" ) for image in images]
a__ : Any = torch.from_numpy(np.array(__lowercase ) ).permute(0 , 3 , 1 , 2 )
a__ : int = torch.clamp(2 * (images / 2_5_5 - 0.5) , min=-1.0 , max=1.0 )
return images
| 266
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowercase : Union[str, Any] =logging.get_logger(__name__)
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 266
| 1
|
import math
import sys
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = ''
try:
with open(__SCREAMING_SNAKE_CASE , 'rb' ) as binary_file:
lowercase = binary_file.read()
for dat in data:
lowercase = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {'0': '0', '1': '1'}
lowercase , lowercase = '', ''
lowercase = len(__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase = lexicon[curr_string]
result += last_match_id
lowercase = last_match_id + '0'
if math.loga(__SCREAMING_SNAKE_CASE ).is_integer():
lowercase = {}
for curr_key in list(__SCREAMING_SNAKE_CASE ):
lowercase = lexicon.pop(__SCREAMING_SNAKE_CASE )
lowercase = new_lex
lowercase = last_match_id + '1'
index += 1
lowercase = ''
return result
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = 8
try:
with open(__SCREAMING_SNAKE_CASE , 'wb' ) as opened_file:
lowercase = [
to_write[i : i + byte_length]
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase = data_bits[counter:]
lowercase = data_bits[counter + 1 :]
return data_bits
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = read_file_binary(__SCREAMING_SNAKE_CASE )
lowercase = remove_prefix(__SCREAMING_SNAKE_CASE )
lowercase = decompress_data(__SCREAMING_SNAKE_CASE )
write_file_binary(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 195
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
return None
class A_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
return None
class A_ ( unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , 'tf' , 12 , **snake_case )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , 'pt' , 12 , **snake_case )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import BertModel
lowercase = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(snake_case ) )
vocab_file.flush()
lowercase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase = BertModel(BertConfig(vocab_size=len(snake_case ) ) )
model.save_pretrained(snake_case )
self._test_export(snake_case , 'pt' , 12 , snake_case )
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase = self._test_export(snake_case , 'tf' , 12 , **snake_case )
lowercase = quantize(Path(snake_case ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase = self._test_export(snake_case , 'pt' , 12 , **snake_case )
lowercase = quantize(snake_case )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase = Path(snake_case ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case )
return path
except Exception as e:
self.fail(snake_case )
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import BertModel
lowercase = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(snake_case , snake_case , 'pt' )
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import TFBertModel
lowercase = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(snake_case , snake_case , 'tf' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = FeatureExtractionPipeline(snake_case , snake_case )
lowercase = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase , lowercase , lowercase , lowercase = infer_shapes(snake_case , snake_case )
# Assert all variables are present
self.assertEqual(len(snake_case ) , len(snake_case ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , snake_case )
self.assertSequenceEqual(variable_names[3:] , snake_case )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase , lowercase = ensure_valid_input(FuncContiguousArgs() , snake_case , snake_case )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(snake_case ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(snake_case ) , set(snake_case ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(snake_case , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase , lowercase = ensure_valid_input(FuncNonContiguousArgs() , snake_case , snake_case )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(snake_case ) , 1 )
self.assertEqual(len(snake_case ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 195
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 360
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( snake_case_ ):
_snake_case : UNetaDModel
_snake_case : KarrasVeScheduler
def __init__( self , lowerCamelCase , lowerCamelCase ):
super().__init__()
self.register_modules(unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase = 1 , lowerCamelCase = 50 , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , **lowerCamelCase , ):
__a = self.unet.config.sample_size
__a = (batch_size, 3, img_size, img_size)
__a = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__a = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__a = self.scheduler.schedule[t]
__a = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__a , __a = self.scheduler.add_noise_to_input(lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__a = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__a = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__a = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__a = self.scheduler.step_correct(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , step_output.prev_sample , step_output["derivative"] , )
__a = step_output.prev_sample
__a = (sample / 2 + 0.5).clamp(0 , 1 )
__a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 268
| 0
|
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : str ="data2vec-audio"
def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__="gelu" , snake_case__=(512, 512, 512, 512, 512, 512, 512) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=16 , snake_case__=19 , snake_case__=5 , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__="sum" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=(512, 512, 512, 512, 1_500) , snake_case__=(5, 3, 3, 1, 1) , snake_case__=(1, 2, 3, 1, 1) , snake_case__=512 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=3 , snake_case__=2 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Tuple = feat_extract_activation
lowerCAmelCase : Any = list(snake_case__ )
lowerCAmelCase : List[Any] = list(snake_case__ )
lowerCAmelCase : Dict = list(snake_case__ )
lowerCAmelCase : List[Any] = conv_bias
lowerCAmelCase : Any = num_conv_pos_embeddings
lowerCAmelCase : List[str] = num_conv_pos_embedding_groups
lowerCAmelCase : List[str] = conv_pos_kernel_size
lowerCAmelCase : Tuple = len(self.conv_dim )
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_dropout
lowerCAmelCase : Optional[int] = attention_dropout
lowerCAmelCase : Tuple = activation_dropout
lowerCAmelCase : Any = feat_proj_dropout
lowerCAmelCase : List[str] = final_dropout
lowerCAmelCase : Optional[Any] = layerdrop
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase : int = mask_time_prob
lowerCAmelCase : List[Any] = mask_time_length
lowerCAmelCase : int = mask_time_min_masks
lowerCAmelCase : Any = mask_feature_prob
lowerCAmelCase : List[Any] = mask_feature_length
lowerCAmelCase : Optional[Any] = mask_feature_min_masks
# ctc loss
lowerCAmelCase : Dict = ctc_loss_reduction
lowerCAmelCase : Tuple = ctc_zero_infinity
# adapter
lowerCAmelCase : int = add_adapter
lowerCAmelCase : Dict = adapter_kernel_size
lowerCAmelCase : Tuple = adapter_stride
lowerCAmelCase : Tuple = num_adapter_layers
lowerCAmelCase : Optional[int] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase : List[Any] = list(snake_case__ )
lowerCAmelCase : Optional[int] = list(snake_case__ )
lowerCAmelCase : List[str] = list(snake_case__ )
lowerCAmelCase : Dict = xvector_output_dim
@property
def lowercase__ ( self ):
"""simple docstring"""
return math.prod(self.conv_stride )
| 108
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : str =BigBirdTokenizer
a : Union[str, Any] =BigBirdTokenizerFast
a : Tuple =True
a : Any =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase : str = self.tokenizer_class(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "<s>"
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(snake_case__ ) , 1_004 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowercase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase : Tuple = self.get_tokenizer()
lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
lowerCAmelCase : Tuple = "I was born in 92000, and this is falsé."
lowerCAmelCase : Optional[int] = tokenizer.tokenize(snake_case__ )
lowerCAmelCase : int = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
lowerCAmelCase : int = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ )
lowerCAmelCase : List[Any] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = BigBirdTokenizer(snake_case__ , keep_accents=snake_case__ )
lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "Hello World!"
lowerCAmelCase : Any = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
lowerCAmelCase : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCAmelCase : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase : int = " ".join(snake_case__ )
lowerCAmelCase : Dict = self.big_tokenizer.encode_plus(snake_case__ , return_tensors="pt" , return_token_type_ids=snake_case__ )
lowerCAmelCase : Any = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=snake_case__ )
lowerCAmelCase : str = BigBirdConfig(attention_type="original_full" )
lowerCAmelCase : Any = BigBirdModel(snake_case__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case__ )
model(**snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
lowerCAmelCase : Union[str, Any] = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 108
| 1
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
A__ : Tuple = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def _snake_case ( ) -> Any:
lowerCamelCase_ : Union[str, Any] =Github(os.environ["GITHUB_TOKEN"] )
lowerCamelCase_ : Optional[Any] =g.get_repo("huggingface/transformers" )
lowerCamelCase_ : Optional[int] =repo.get_issues(state="open" )
for issue in open_issues:
lowerCamelCase_ : int =sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase__ : i.created_at , reverse=lowerCamelCase__ )
lowerCamelCase_ : int =comments[0] if len(lowerCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 209
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ) -> Tuple:
lowerCamelCase_ : Optional[int] =ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCamelCase__ )
lowerCamelCase_ : Optional[int] =parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCamelCase__ )
env_command_parser(subparsers=lowerCamelCase__ )
launch_command_parser(subparsers=lowerCamelCase__ )
tpu_command_parser(subparsers=lowerCamelCase__ )
test_command_parser(subparsers=lowerCamelCase__ )
# Let's go
lowerCamelCase_ : int =parser.parse_args()
if not hasattr(lowerCamelCase__ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 209
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if len(__lowercase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
A: Any = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 1
|
import numpy
# List of input, output pairs
_UpperCAmelCase = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_UpperCAmelCase = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_UpperCAmelCase = [2, 4, 1, 5]
_UpperCAmelCase = len(train_data)
_UpperCAmelCase = 0.009
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_="train" ) -> Optional[int]:
return calculate_hypothesis_value(UpperCamelCase_ , UpperCamelCase_ ) - output(
UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = 0
for i in range(len(UpperCamelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=m ) -> Dict:
UpperCamelCase_ = 0
for i in range(UpperCamelCase_ ):
if index == -1:
summation_value += _error(UpperCamelCase_ )
else:
summation_value += _error(UpperCamelCase_ ) * train_data[i][0][index]
return summation_value
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Union[str, Any]:
UpperCamelCase_ = summation_of_cost_derivative(UpperCamelCase_ , UpperCamelCase_ ) / m
return cost_derivative_value
def lowerCAmelCase_ ( ) -> int:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCamelCase_ = 0.00_00_02
UpperCamelCase_ = 0
UpperCamelCase_ = 0
while True:
j += 1
UpperCamelCase_ = [0, 0, 0, 0]
for i in range(0 , len(UpperCamelCase_ ) ):
UpperCamelCase_ = get_cost_derivative(i - 1 )
UpperCamelCase_ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCamelCase_ , UpperCamelCase_ , atol=UpperCamelCase_ , rtol=UpperCamelCase_ , ):
break
UpperCamelCase_ = temp_parameter_vector
print(("Number of iterations:", j) )
def lowerCAmelCase_ ( ) -> Tuple:
for i in range(len(UpperCamelCase_ ) ):
print(("Actual output value:", output(UpperCamelCase_ , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(UpperCamelCase_ , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 328
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 5_1_2,
}
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str="<pad>" , _SCREAMING_SNAKE_CASE: Optional[Any]="</s>" , _SCREAMING_SNAKE_CASE: Any="<unk>" , _SCREAMING_SNAKE_CASE: int="<mask_2>" , _SCREAMING_SNAKE_CASE: List[Any]="<mask_1>" , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=103 , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE: Dict , ) -> None:
"""simple docstring"""
UpperCamelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_SCREAMING_SNAKE_CASE )}, but is'''
f''' {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(_SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCamelCase_ = additional_special_tokens_extended
else:
UpperCamelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token_sent=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = mask_token_sent
UpperCamelCase_ = vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# add special tokens to encoder dict
UpperCamelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
@property
def lowercase ( self: Dict ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowercase ( self: int ) -> Dict[str, int]:
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
return state
def __setstate__( self: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCamelCase_ = self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
return sp_id + self.offset
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: int ) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCamelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = []
UpperCamelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
UpperCamelCase_ = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
return 1
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List , _SCREAMING_SNAKE_CASE: Optional[List] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(_SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 328
| 1
|
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowercase__ = 100
lowercase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowercase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __a ( _SCREAMING_SNAKE_CASE ) ->set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
a__: set[int] = set()
a__: int
a__: int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __a ( _SCREAMING_SNAKE_CASE = 5000 ) ->int | None:
for number_to_partition in range(1 , _SCREAMING_SNAKE_CASE ):
if len(partition(_SCREAMING_SNAKE_CASE ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"{solution() = }")
| 290
|
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( __lowerCAmelCase ):
a__ = 42
a__ = jnp.floataa
a__ = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
super().setup()
a__: int = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
a__: Dict = super().__call__(*lowercase , **lowercase)
a__: str = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class __snake_case ( __lowerCAmelCase ):
a__ = FlaxBigBirdForNaturalQuestionsModule
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
def cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
a__: Any = logits.shape[-1]
a__: List[Any] = (labels[..., None] == jnp.arange(_SCREAMING_SNAKE_CASE )[None]).astype('f4' )
a__: List[str] = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
a__: Dict = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
a__: str = reduction(_SCREAMING_SNAKE_CASE )
return loss
a__: Tuple = partial(_SCREAMING_SNAKE_CASE , reduction=jnp.mean )
a__: List[str] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
a__ = "google/bigbird-roberta-base"
a__ = 3000
a__ = 1_0500
a__ = 128
a__ = 3
a__ = 1
a__ = 5
# tx_args
a__ = 3e-5
a__ = 0.0
a__ = 2_0000
a__ = 0.0095
a__ = "bigbird-roberta-natural-questions"
a__ = "training-expt"
a__ = "data/nq-training.jsonl"
a__ = "data/nq-validation.jsonl"
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=lowercase)
a__: str = os.path.join(self.base_dir , self.save_dir)
a__: List[str] = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
a__ = 42
a__ = 4096 # no dynamic padding on TPUs
def __call__( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: int = self.collate_fn(lowercase)
a__: Optional[int] = jax.tree_util.tree_map(lowercase , lowercase)
return batch
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__ , a__: Dict = self.fetch_inputs(features['input_ids'])
a__: List[Any] = {
'input_ids': jnp.array(lowercase , dtype=jnp.intaa),
'attention_mask': jnp.array(lowercase , dtype=jnp.intaa),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa),
}
return batch
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
a__: List[Any] = [self._fetch_inputs(lowercase) for ids in input_ids]
return zip(*lowercase)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = [1 for _ in range(len(lowercase))]
while len(lowercase) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
if seed is not None:
a__: int = dataset.shuffle(seed=_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) // batch_size ):
a__: Union[str, Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_SCREAMING_SNAKE_CASE )
@partial(jax.pmap , axis_name='batch' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Any:
def loss_fn(_SCREAMING_SNAKE_CASE ):
a__: str = model_inputs.pop('start_labels' )
a__: Dict = model_inputs.pop('end_labels' )
a__: Optional[int] = model_inputs.pop('pooled_labels' )
a__: Optional[Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , dropout_rng=_SCREAMING_SNAKE_CASE , train=_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Optional[int] = outputs
return state.loss_fn(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
a__ , a__: Union[str, Any] = jax.random.split(_SCREAMING_SNAKE_CASE )
a__: List[Any] = jax.value_and_grad(_SCREAMING_SNAKE_CASE )
a__ , a__: str = grad_fn(state.params )
a__: Optional[int] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
a__: int = jax.lax.pmean(_SCREAMING_SNAKE_CASE , 'batch' )
a__: Union[str, Any] = state.apply_gradients(grads=_SCREAMING_SNAKE_CASE )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def __a ( _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: Optional[int] = model_inputs.pop('start_labels' )
a__: int = model_inputs.pop('end_labels' )
a__: Dict = model_inputs.pop('pooled_labels' )
a__: Union[str, Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=state.params , train=_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: int = outputs
a__: Optional[int] = state.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Tuple = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class __snake_case ( train_state.TrainState ):
a__ = struct.field(pytree_node=__lowerCAmelCase )
@dataclass
class __snake_case :
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = None
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None) -> Optional[int]:
'''simple docstring'''
a__: Dict = model.params
a__: Any = TrainState.create(
apply_fn=model.__call__ , params=lowercase , tx=lowercase , loss_fn=lowercase , )
if ckpt_dir is not None:
a__ , a__ , a__ , a__ , a__: Any = restore_checkpoint(lowercase , lowercase)
a__: Any = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
a__ , a__: str = build_tx(**lowercase)
a__: Optional[Any] = train_state.TrainState(
step=lowercase , apply_fn=model.__call__ , params=lowercase , tx=lowercase , opt_state=lowercase , )
a__: int = args
a__: Union[str, Any] = data_collator
a__: Any = lr
a__: Dict = params
a__: Tuple = jax_utils.replicate(lowercase)
return state
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: int = self.args
a__: str = len(lowercase) // args.batch_size
a__: Tuple = jax.random.PRNGKey(0)
a__: List[Any] = jax.random.split(lowercase , jax.device_count())
for epoch in range(args.max_epochs):
a__: str = jnp.array(0 , dtype=jnp.floataa)
a__: Tuple = get_batched_dataset(lowercase , args.batch_size , seed=lowercase)
a__: Optional[int] = 0
for batch in tqdm(lowercase , total=lowercase , desc=f'Running EPOCH-{epoch}'):
a__: List[str] = self.data_collator(lowercase)
a__ , a__ , a__: int = self.train_step_fn(lowercase , lowercase , **lowercase)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
if i % args.logging_steps == 0:
a__: List[Any] = jax_utils.unreplicate(state.step)
a__: Tuple = running_loss.item() / i
a__: Optional[Any] = self.scheduler_fn(state_step - 1)
a__: List[Any] = self.evaluate(lowercase , lowercase)
a__: List[str] = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(lowercase))
self.logger.log(lowercase , commit=lowercase)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Tuple = get_batched_dataset(lowercase , self.args.batch_size)
a__: Dict = len(lowercase) // self.args.batch_size
a__: Tuple = jnp.array(0 , dtype=jnp.floataa)
a__: List[Any] = 0
for batch in tqdm(lowercase , total=lowercase , desc='Evaluating ... '):
a__: str = self.data_collator(lowercase)
a__: List[str] = self.val_step_fn(lowercase , **lowercase)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
return running_loss / i
def lowerCamelCase_ ( self , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: List[Any] = jax_utils.unreplicate(lowercase)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=' ... ')
self.model_save_fn(lowercase , params=state.params)
with open(os.path.join(lowercase , 'opt_state.msgpack') , 'wb') as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(lowercase , 'args.joblib'))
joblib.dump(self.data_collator , os.path.join(lowercase , 'data_collator.joblib'))
with open(os.path.join(lowercase , 'training_state.json') , 'w') as f:
json.dump({'step': state.step.item()} , lowercase)
print('DONE')
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=' ... ' )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'flax_model.msgpack' ) , 'rb' ) as f:
a__: int = from_bytes(state.params , f.read() )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'opt_state.msgpack' ) , 'rb' ) as f:
a__: Optional[Any] = from_bytes(state.opt_state , f.read() )
a__: Optional[Any] = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'args.joblib' ) )
a__: int = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'data_collator.joblib' ) )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'training_state.json' ) , 'r' ) as f:
a__: Any = json.load(_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
a__: str = num_train_steps - warmup_steps
a__: str = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=_SCREAMING_SNAKE_CASE , transition_steps=_SCREAMING_SNAKE_CASE )
a__: List[Any] = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=1e-7 , transition_steps=_SCREAMING_SNAKE_CASE )
a__: int = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
def weight_decay_mask(_SCREAMING_SNAKE_CASE ):
a__: List[Any] = traverse_util.flatten_dict(_SCREAMING_SNAKE_CASE )
a__: List[str] = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(_SCREAMING_SNAKE_CASE )
a__: List[str] = scheduler_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = optax.adamw(learning_rate=_SCREAMING_SNAKE_CASE , weight_decay=_SCREAMING_SNAKE_CASE , mask=_SCREAMING_SNAKE_CASE )
return tx, lr
| 290
| 1
|
def UpperCAmelCase ( UpperCAmelCase = 4000000 ) -> int:
snake_case_ = [0, 1]
snake_case_ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
snake_case_ = 0
for j in range(len(__A ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 357
|
"""simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312
| 0
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
__lowercase = LongformerTokenizer
__lowercase = True
__lowercase = LongformerTokenizerFast
__lowercase = True
def UpperCAmelCase_ ( self :Optional[Any] )-> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
A__ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
A__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
A__ = {"unk_token": "<unk>"}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def UpperCAmelCase_ ( self :Tuple , **lowercase_ :List[Any] )-> Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self :int , **lowercase_ :Tuple )-> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self :Any , lowercase_ :Optional[int] )-> Tuple:
A__ = "lower newer"
A__ = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self :Any )-> Tuple:
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = "lower newer"
A__ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
A__ = tokenizer.tokenize(lowercase_ ) # , add_prefix_space=True)
self.assertListEqual(lowercase_ , lowercase_ )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self :str )-> List[str]:
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowercase_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowercase_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCAmelCase_ ( self :List[Any] )-> Dict:
A__ = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
A__ = tokenizer.encode("sequence builders" , add_special_tokens=lowercase_ )
A__ = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase_ )
A__ = tokenizer.encode(
"sequence builders" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
A__ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase_ ( self :List[str] )-> Optional[int]:
A__ = self.get_tokenizer()
A__ = "Encode this sequence."
A__ = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
A__ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase_ , lowercase_ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
A__ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
# Testing spaces after special tokens
A__ = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(lowercase_ )
A__ = "Encode <mask> sequence"
A__ = "Encode <mask>sequence"
A__ = tokenizer.encode(lowercase_ )
A__ = encoded.index(lowercase_ )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase_ , lowercase_ )
A__ = tokenizer.encode(lowercase_ )
A__ = encoded.index(lowercase_ )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :Any )-> Optional[int]:
pass
def UpperCAmelCase_ ( self :List[str] )-> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
A__ = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
A__ = "A, <mask> AllenNLP sentence."
A__ = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
A__ = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def UpperCAmelCase_ ( self :int )-> Optional[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowercase_ )
self.assertEqual(post_processor_state["add_prefix_space"] , lowercase_ )
self.assertEqual(post_processor_state["trim_offsets"] , lowercase_ )
def UpperCAmelCase_ ( self :Any )-> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F"{text_of_1_token} {text_of_1_token}"
A__ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
A__ = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
A__ = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
A__ = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
A__ = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
A__ = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
A__ = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ) + 1, 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
A__ = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
A__ = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
| 237
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : Optional[int] =16
__lowerCAmelCase : Tuple =32
def UpperCamelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : DatasetDict , _lowerCamelCase : List[int] , _lowerCamelCase : List[int] , _lowerCamelCase : int = 16 ):
A__ = AutoTokenizer.from_pretrained("bert-base-cased" )
A__ = DatasetDict(
{
"train": dataset["train"].select(_lowerCamelCase ),
"validation": dataset["train"].select(_lowerCamelCase ),
"test": dataset["validation"],
} )
def tokenize_function(_lowerCamelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["test"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : str ):
# New Code #
A__ = []
# Download the dataset
A__ = load_dataset("glue" , "mrpc" )
# Create our splits
A__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
A__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(_lowerCamelCase )
# New Code #
# Create our folds:
A__ = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
A__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowerCamelCase ):
A__, A__, A__ = get_fold_dataloaders(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__, A__, A__, A__, A__ = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**_lowerCamelCase )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits.argmax(dim=-1 )
A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
# New Code #
# We also run predictions on the test set at the very end
A__ = []
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits
A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowerCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ = torch.cat(_lowerCamelCase , dim=0 )
A__ = torch.stack(_lowerCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ = metric.compute(predictions=_lowerCamelCase , references=_lowerCamelCase )
accelerator.print("Average test metrics from all folds:" , _lowerCamelCase )
def UpperCamelCase ( ):
A__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=_lowerCamelCase , default=3 , help="The number of splits to perform across the dataset" )
A__ = parser.parse_args()
A__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 237
| 1
|
__lowerCamelCase : Optional[int] = """Input must be a string of 8 numbers plus letter"""
__lowerCamelCase : Any = """TRWAGMYFPDXBNJZSQVHLCKE"""
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
if not isinstance(snake_case_ , snake_case_ ):
snake_case__ : str = F'''Expected string as input, found {type(snake_case_ ).__name__}'''
raise TypeError(snake_case_ )
snake_case__ : Tuple = spanish_id.replace("-" , "" ).upper()
if len(snake_case_ ) != 9:
raise ValueError(snake_case_ )
try:
snake_case__ : Union[str, Any] = int(spanish_id_clean[0:8] )
snake_case__ : Tuple = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(snake_case_ ) from ex
if letter.isdigit():
raise ValueError(snake_case_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Any = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Dict = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _lowerCamelCase( _a ):
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
requires_backends(self, 'vision')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def UpperCamelCase ( self, lowerCamelCase=None) -> int:
"""simple docstring"""
_lowercase : Dict = {}
if top_k is not None:
_lowercase : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self, lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
return super().__call__(lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = load_image(lowerCamelCase)
_lowercase : List[str] = self.image_processor(images=lowerCamelCase, return_tensors=self.framework)
return model_inputs
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.model(**lowerCamelCase)
return model_outputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=5) -> Dict:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowercase : List[Any] = self.model.config.num_labels
if self.framework == "pt":
_lowercase : int = model_outputs.logits.softmax(-1)[0]
_lowercase , _lowercase : Union[str, Any] = probs.topk(lowerCamelCase)
elif self.framework == "tf":
_lowercase : int = stable_softmax(model_outputs.logits, axis=-1)[0]
_lowercase : List[Any] = tf.math.top_k(lowerCamelCase, k=lowerCamelCase)
_lowercase , _lowercase : Any = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''')
_lowercase : str = scores.tolist()
_lowercase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase, lowerCamelCase)]
| 21
| 0
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__UpperCAmelCase :Union[str, Any] = logging.get_logger(__name__)
class a :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : str = None
@staticmethod
def lowerCamelCase__ ( ) -> str:
raise NotImplementedError
def lowerCamelCase__ ( self : Optional[int] , snake_case : List[str] , snake_case : Optional[Any] , snake_case : List[str] , **snake_case : Optional[Any] ) -> Dict:
raise NotImplementedError
def lowerCamelCase__ ( self : str , snake_case : Optional[int] ) -> Tuple:
raise NotImplementedError
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def lowerCamelCase__ ( cls : Dict ) -> Optional[int]:
return f'`pip install {cls.pip_package or cls.name}`'
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = """optuna"""
@staticmethod
def lowerCamelCase__ ( ) -> Tuple:
return is_optuna_available()
def lowerCamelCase__ ( self : Optional[int] , snake_case : List[Any] , snake_case : List[str] , snake_case : int , **snake_case : Any ) -> Dict:
return run_hp_search_optuna(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : List[Any] ) -> str:
return default_hp_space_optuna(__lowerCAmelCase )
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = """ray"""
SCREAMING_SNAKE_CASE : Any = """'ray[tune]'"""
@staticmethod
def lowerCamelCase__ ( ) -> Any:
return is_ray_available()
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : List[Any] , snake_case : List[Any] , snake_case : List[str] , **snake_case : int ) -> Tuple:
return run_hp_search_ray(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple , snake_case : List[Any] ) -> Optional[int]:
return default_hp_space_ray(__lowerCAmelCase )
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = """sigopt"""
@staticmethod
def lowerCamelCase__ ( ) -> Optional[Any]:
return is_sigopt_available()
def lowerCamelCase__ ( self : int , snake_case : List[Any] , snake_case : int , snake_case : Optional[Any] , **snake_case : Any ) -> Any:
return run_hp_search_sigopt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] , snake_case : Optional[Any] ) -> Dict:
return default_hp_space_sigopt(__lowerCAmelCase )
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = """wandb"""
@staticmethod
def lowerCamelCase__ ( ) -> Dict:
return is_wandb_available()
def lowerCamelCase__ ( self : Any , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : str , **snake_case : Optional[int] ) -> Union[str, Any]:
return run_hp_search_wandb(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def lowerCamelCase__ ( self : int , snake_case : Tuple ) -> Tuple:
return default_hp_space_wandb(__lowerCAmelCase )
__UpperCAmelCase :Any = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : int = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(a__ ) > 0:
__UpperCAmelCase : List[Any] = available_backends[0].name
if len(a__ ) > 1:
logger.info(
F'{len(a__ )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 371
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__UpperCAmelCase :Tuple = "Create a default config file for Accelerate with only a few flags set."
def _a ( _lowercase : List[Any]="no" , _lowercase : str = default_json_config_file , _lowercase : bool = False ):
'''simple docstring'''
__UpperCAmelCase : Dict = Path(_lowercase )
path.parent.mkdir(parents=_lowercase , exist_ok=_lowercase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__UpperCAmelCase : List[str] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__UpperCAmelCase : int = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
__UpperCAmelCase : Optional[Any] = torch.cuda.device_count()
__UpperCAmelCase : List[str] = num_gpus
__UpperCAmelCase : int = False
if num_gpus > 1:
__UpperCAmelCase : Any = '''MULTI_GPU'''
else:
__UpperCAmelCase : int = '''NO'''
elif is_xpu_available() and use_xpu:
__UpperCAmelCase : List[Any] = torch.xpu.device_count()
__UpperCAmelCase : List[Any] = num_xpus
__UpperCAmelCase : Optional[int] = False
if num_xpus > 1:
__UpperCAmelCase : Any = '''MULTI_XPU'''
else:
__UpperCAmelCase : Optional[Any] = '''NO'''
elif is_npu_available():
__UpperCAmelCase : Dict = torch.npu.device_count()
__UpperCAmelCase : Any = num_npus
__UpperCAmelCase : Any = False
if num_npus > 1:
__UpperCAmelCase : Dict = '''MULTI_NPU'''
else:
__UpperCAmelCase : Optional[int] = '''NO'''
else:
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Tuple = '''NO'''
__UpperCAmelCase : List[Any] = ClusterConfig(**_lowercase )
config.to_json_file(_lowercase )
return path
def _a ( _lowercase : Union[str, Any] , _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = parser.add_parser('''default''' , parents=_lowercase , help=_lowercase , formatter_class=_lowercase )
parser.add_argument(
'''--config_file''' , default=_lowercase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=_lowercase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=_lowercase )
return parser
def _a ( _lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 240
| 0
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Dict="attention" ):
"""simple docstring"""
lowerCAmelCase_ = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowerCAmelCase_ = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowerCAmelCase_ = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowerCAmelCase_ = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
if split_mlp_wi:
lowerCAmelCase_ = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowerCAmelCase_ = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowerCAmelCase_ = (wi_a, wi_a)
else:
lowerCAmelCase_ = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowerCAmelCase_ = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def lowerCamelCase__ ( __lowerCAmelCase : dict , *, __lowerCAmelCase : int , __lowerCAmelCase : bool ):
"""simple docstring"""
lowerCAmelCase_ = traverse_util.flatten_dict(variables["target"] )
lowerCAmelCase_ = {"/".join(__lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCAmelCase )
lowerCAmelCase_ = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ = tax_layer_norm_lookup(__lowerCAmelCase , __lowerCAmelCase , "encoder" , "pre_attention_layer_norm" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = tax_attention_lookup(__lowerCAmelCase , __lowerCAmelCase , "encoder" , "attention" )
lowerCAmelCase_ = layer_norm
lowerCAmelCase_ = k.T
lowerCAmelCase_ = o.T
lowerCAmelCase_ = q.T
lowerCAmelCase_ = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ = tax_layer_norm_lookup(__lowerCAmelCase , __lowerCAmelCase , "encoder" , "pre_mlp_layer_norm" )
lowerCAmelCase_ , lowerCAmelCase_ = tax_mlp_lookup(__lowerCAmelCase , __lowerCAmelCase , "encoder" , __lowerCAmelCase )
lowerCAmelCase_ = layer_norm
if split_mlp_wi:
lowerCAmelCase_ = wi[0].T
lowerCAmelCase_ = wi[1].T
else:
lowerCAmelCase_ = wi.T
lowerCAmelCase_ = wo.T
lowerCAmelCase_ = old[
"encoder/relpos_bias/rel_embedding"
].T
lowerCAmelCase_ = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(__lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ = tax_layer_norm_lookup(__lowerCAmelCase , __lowerCAmelCase , "decoder" , "pre_self_attention_layer_norm" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = tax_attention_lookup(__lowerCAmelCase , __lowerCAmelCase , "decoder" , "self_attention" )
lowerCAmelCase_ = layer_norm
lowerCAmelCase_ = k.T
lowerCAmelCase_ = o.T
lowerCAmelCase_ = q.T
lowerCAmelCase_ = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ = tax_layer_norm_lookup(__lowerCAmelCase , __lowerCAmelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = tax_attention_lookup(__lowerCAmelCase , __lowerCAmelCase , "decoder" , "encoder_decoder_attention" )
lowerCAmelCase_ = layer_norm
lowerCAmelCase_ = k.T
lowerCAmelCase_ = o.T
lowerCAmelCase_ = q.T
lowerCAmelCase_ = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ = tax_layer_norm_lookup(__lowerCAmelCase , __lowerCAmelCase , "decoder" , "pre_mlp_layer_norm" )
lowerCAmelCase_ , lowerCAmelCase_ = tax_mlp_lookup(__lowerCAmelCase , __lowerCAmelCase , "decoder" , __lowerCAmelCase )
lowerCAmelCase_ = layer_norm
if split_mlp_wi:
lowerCAmelCase_ = wi[0].T
lowerCAmelCase_ = wi[1].T
else:
lowerCAmelCase_ = wi.T
lowerCAmelCase_ = wo.T
lowerCAmelCase_ = old["decoder/decoder_norm/scale"]
lowerCAmelCase_ = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ = old["decoder/logits_dense/kernel"].T
return new
def lowerCamelCase__ ( __lowerCAmelCase : int , __lowerCAmelCase : bool ):
"""simple docstring"""
lowerCAmelCase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowerCAmelCase_ = state_dict["shared.weight"]
return state_dict
def lowerCamelCase__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase_ = checkpoints.load_tax_checkpoint(__lowerCAmelCase )
lowerCAmelCase_ = convert_tax_to_pytorch(__lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=__lowerCAmelCase )
lowerCAmelCase_ = make_state_dict(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
def lowerCamelCase__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : bool = False ):
"""simple docstring"""
lowerCAmelCase_ = TaConfig.from_json_file(__lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ = TaEncoderModel(__lowerCAmelCase )
else:
lowerCAmelCase_ = TaForConditionalGeneration(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCAmelCase )
print("Done" )
if __name__ == "__main__":
_A = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
_A = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 231
|
import functools
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = len(__lowerCAmelCase )
lowerCAmelCase_ = len(__lowerCAmelCase )
@functools.cache
def min_distance(__lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __lowerCAmelCase ) , 1 + min_distance(__lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231
| 1
|
import argparse
import os
import re
import packaging.version
lowercase__ ='examples/'
lowercase__ ={
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowercase__ ={
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowercase__ ='README.md'
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ):
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Tuple = f.read()
__a , __a : Optional[int] = REPLACE_PATTERNS[pattern]
__a : List[Any] = replace.replace('''VERSION''' , lowerCAmelCase__ )
__a : Any = re_pattern.sub(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
for folder, directories, fnames in os.walk(lowerCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , pattern='''examples''' )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not patch:
update_version_in_examples(lowerCAmelCase__ )
def __UpperCamelCase ( ):
__a : Optional[int] = '''🤗 Transformers currently provides the following architectures'''
__a : int = '''1. Want to contribute a new model?'''
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Tuple = f.readlines()
# Find the start of the list.
__a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__a : Any = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__a : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowerCAmelCase__ )
def __UpperCamelCase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__a : Optional[int] = f.read()
__a : str = REPLACE_PATTERNS['''init'''][0].search(lowerCAmelCase__ ).groups()[0]
return packaging.version.parse(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any]=False ):
__a : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__a : Union[str, Any] = default_version.base_version
elif patch:
__a : Tuple = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__a : List[str] = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__a : List[str] = input(f"Which version are you releasing? [{default_version}]" )
if len(lowerCAmelCase__ ) == 0:
__a : Tuple = default_version
print(f"Updating version to {version}." )
global_version_update(lowerCAmelCase__ , patch=lowerCAmelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCamelCase ( ):
__a : Dict = get_version()
__a : str = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__a : Any = current_version.base_version
# Check with the user we got that right.
__a : Any = input(f"Which version are we developing now? [{dev_version}]" )
if len(lowerCAmelCase__ ) == 0:
__a : Any = dev_version
print(f"Updating version to {version}." )
global_version_update(lowerCAmelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowercase__ =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 90
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 90
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.