code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
a__ : Optional[int] = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
warnings.warn(a__ , a__ )
requires_backends(a__ , '''sklearn''' )
return (preds == labels).mean()
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
warnings.warn(a__ , a__ )
requires_backends(a__ , '''sklearn''' )
SCREAMING_SNAKE_CASE : Tuple = simple_accuracy(a__ , a__ )
SCREAMING_SNAKE_CASE : Tuple = fa_score(y_true=a__ , y_pred=a__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
warnings.warn(a__ , a__ )
requires_backends(a__ , '''sklearn''' )
SCREAMING_SNAKE_CASE : str = pearsonr(a__ , a__ )[0]
SCREAMING_SNAKE_CASE : List[str] = spearmanr(a__ , a__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
warnings.warn(a__ , a__ )
requires_backends(a__ , '''sklearn''' )
assert len(a__ ) == len(a__ ), F"""Predictions and labels have mismatched lengths {len(a__ )} and {len(a__ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(a__ , a__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(a__ , a__ )}
elif task_name == "mrpc":
return acc_and_fa(a__ , a__ )
elif task_name == "sts-b":
return pearson_and_spearman(a__ , a__ )
elif task_name == "qqp":
return acc_and_fa(a__ , a__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(a__ , a__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(a__ , a__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(a__ , a__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(a__ , a__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(a__ , a__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(a__ , a__ )}
else:
raise KeyError(a__ )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
warnings.warn(a__ , a__ )
requires_backends(a__ , '''sklearn''' )
if len(a__ ) != len(a__ ):
raise ValueError(F"""Predictions and labels have mismatched lengths {len(a__ )} and {len(a__ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(a__ , a__ )}
else:
raise KeyError(a__ )
| 313
|
from __future__ import annotations
import math
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
SCREAMING_SNAKE_CASE : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
SCREAMING_SNAKE_CASE : str = len(a__ )
SCREAMING_SNAKE_CASE : Any = matrix_length // 2
SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : Optional[int] = [
[a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ )
]
SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return len(a__ ), len(matrix[0] )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
print('''\n'''.join(str(a__ ) for line in matrix ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ ) == (2, 2):
return default_matrix_multiplication(a__ , a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(a__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]:
SCREAMING_SNAKE_CASE : Any = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a__ )
SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ )
SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = matrixa
SCREAMING_SNAKE_CASE : Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ )
# Removing the additional zeros
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 313
| 1
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = 'M-CLIP'
def __init__( self , _lowerCamelCase=1024 , _lowerCamelCase=768 , **_lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = transformerDimSize
SCREAMING_SNAKE_CASE : Dict = imageDimSize
super().__init__(**_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = MCLIPConfig
def __init__( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) ->Dict:
super().__init__(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = XLMRobertaModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Dict = self.transformer(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
SCREAMING_SNAKE_CASE : str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowerCamelCase ), embs
| 313
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any:
SCREAMING_SNAKE_CASE : str = scheduler
SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches
SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer
SCREAMING_SNAKE_CASE : List[str] = GradientState()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes
for _ in range(_lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return self.scheduler.get_last_lr()
def __lowerCAmelCase ( self ) ->List[str]:
return self.scheduler.state_dict()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
self.scheduler.load_state_dict(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
return self.scheduler.get_lr()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]:
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
| 313
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : int = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ['''LayoutLMv2FeatureExtractor''']
a__ : Optional[Any] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ )
return k
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE : Dict = v.T
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE : int = task_specific_params
SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : Any = Path(args.tf_ckpt_path).parent.name
a__ : int = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
import csv
import tweepy
# Twitter API credentials
a__ : Union[str, Any] = ''''''
a__ : List[str] = ''''''
a__ : Any = ''''''
a__ : List[str] = ''''''
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ )
auth.set_access_token(a__ , a__ )
SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 )
# save most recent tweets
alltweets.extend(a__ )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a__ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE : Any = api.user_timeline(
screen_name=a__ , count=200 , max_id=a__ )
# save most recent tweets
alltweets.extend(a__ )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1
print(F"""...{len(a__ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(a__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[str] = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase_( a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def UpperCAmelCase_( a__ , a__ , a__=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE : Any = ''''''
else:
SCREAMING_SNAKE_CASE : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ViTMSNConfig()
SCREAMING_SNAKE_CASE : Optional[int] = 1_000
SCREAMING_SNAKE_CASE : str = '''datasets/huggingface/label-files'''
SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ ) , '''r''' ) )
SCREAMING_SNAKE_CASE : List[Any] = {int(a__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = idalabel
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = 384
SCREAMING_SNAKE_CASE : Any = 1_536
SCREAMING_SNAKE_CASE : List[str] = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = 1_024
SCREAMING_SNAKE_CASE : Optional[int] = 4_096
SCREAMING_SNAKE_CASE : Tuple = 24
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : Dict = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024
SCREAMING_SNAKE_CASE : List[Any] = 4_096
SCREAMING_SNAKE_CASE : List[Any] = 24
SCREAMING_SNAKE_CASE : Tuple = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNModel(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['''target_encoder''']
SCREAMING_SNAKE_CASE : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(a__ )
SCREAMING_SNAKE_CASE : Any = create_rename_keys(a__ , base_model=a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , base_model=a__ )
model.load_state_dict(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(a__ , stream=a__ ).raw )
SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=a__ , image_std=a__ )
SCREAMING_SNAKE_CASE : int = image_processor(images=a__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : Tuple = model(**a__ )
SCREAMING_SNAKE_CASE : str = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : int = ['''model.decoder.embed_positions.weights''']
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if "emb" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
SCREAMING_SNAKE_CASE : int = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = list(state_dict.keys() )
SCREAMING_SNAKE_CASE : Any = {}
for key in keys:
SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(a__ )
SCREAMING_SNAKE_CASE : Tuple = rename_keys(a__ )
if "in_proj_weight" in key:
# split fused qkv proj
SCREAMING_SNAKE_CASE : List[Any] = val[:hidden_size, :]
SCREAMING_SNAKE_CASE : List[str] = val[hidden_size : 2 * hidden_size, :]
SCREAMING_SNAKE_CASE : str = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
SCREAMING_SNAKE_CASE : int = val
else:
SCREAMING_SNAKE_CASE : str = val
return state_dict, enc_dec_proj_state_dict
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if checkpoint == "small":
# default config values
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024
SCREAMING_SNAKE_CASE : Dict = 24
SCREAMING_SNAKE_CASE : Dict = 16
elif checkpoint == "medium":
SCREAMING_SNAKE_CASE : List[Any] = 1_536
SCREAMING_SNAKE_CASE : Union[str, Any] = 48
SCREAMING_SNAKE_CASE : List[Any] = 24
elif checkpoint == "large":
SCREAMING_SNAKE_CASE : Dict = 2_048
SCREAMING_SNAKE_CASE : List[str] = 48
SCREAMING_SNAKE_CASE : Optional[Any] = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
SCREAMING_SNAKE_CASE : Tuple = MusicgenDecoderConfig(
hidden_size=a__ , ffn_dim=hidden_size * 4 , num_hidden_layers=a__ , num_attention_heads=a__ , )
return config
@torch.no_grad()
def UpperCAmelCase_( a__ , a__=None , a__=None , a__="cpu" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = MusicGen.get_pretrained(a__ , device=a__ )
SCREAMING_SNAKE_CASE : str = decoder_config_from_checkpoint(a__ )
SCREAMING_SNAKE_CASE : List[str] = fairseq_model.lm.state_dict()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = rename_state_dict(
a__ , hidden_size=decoder_config.hidden_size )
SCREAMING_SNAKE_CASE : int = TaEncoderModel.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE : Tuple = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(a__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = decoder.load_state_dict(a__ , strict=a__ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a__ )
if len(a__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(a__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
SCREAMING_SNAKE_CASE : str = MusicgenForConditionalGeneration(text_encoder=a__ , audio_encoder=a__ , decoder=a__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a__ )
# check we can do a forward pass
SCREAMING_SNAKE_CASE : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(input_ids=a__ , decoder_input_ids=a__ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
SCREAMING_SNAKE_CASE : Tuple = MusicgenProcessor(feature_extractor=a__ , tokenizer=a__ )
# set the appropriate bos/pad token ids
SCREAMING_SNAKE_CASE : List[str] = 2_048
SCREAMING_SNAKE_CASE : str = 2_048
# set other default generation config params
SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate )
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(a__ ).mkdir(exist_ok=a__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(a__ )
processor.push_to_hub(a__ )
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
a__ : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 313
|
import csv
import tweepy
# Twitter API credentials
a__ : Union[str, Any] = ''''''
a__ : List[str] = ''''''
a__ : Any = ''''''
a__ : List[str] = ''''''
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ )
auth.set_access_token(a__ , a__ )
SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 )
# save most recent tweets
alltweets.extend(a__ )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a__ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE : Any = api.user_timeline(
screen_name=a__ , count=200 , max_id=a__ )
# save most recent tweets
alltweets.extend(a__ )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1
print(F"""...{len(a__ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(a__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 313
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[Any] = logging.get_logger(__name__)
a__ : List[Any] = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class a_ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = 'bit'
__SCREAMING_SNAKE_CASE : Tuple = ['preactivation', 'bottleneck']
__SCREAMING_SNAKE_CASE : Tuple = ['SAME', 'VALID']
def __init__( self , _lowerCamelCase=3 , _lowerCamelCase=64 , _lowerCamelCase=[256, 512, 1024, 2048] , _lowerCamelCase=[3, 4, 6, 3] , _lowerCamelCase="preactivation" , _lowerCamelCase="relu" , _lowerCamelCase=None , _lowerCamelCase=32 , _lowerCamelCase=0.0 , _lowerCamelCase=False , _lowerCamelCase=32 , _lowerCamelCase=1 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(**_lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
SCREAMING_SNAKE_CASE : Any = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = embedding_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes
SCREAMING_SNAKE_CASE : Any = depths
SCREAMING_SNAKE_CASE : Dict = layer_type
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = global_padding
SCREAMING_SNAKE_CASE : Optional[Any] = num_groups
SCREAMING_SNAKE_CASE : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE : Any = embedding_dynamic_padding
SCREAMING_SNAKE_CASE : List[Any] = output_stride
SCREAMING_SNAKE_CASE : int = width_factor
SCREAMING_SNAKE_CASE : Optional[int] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_lowerCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 313
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'ibert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Dict = force_dequant
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 255 , _lowerCamelCase=True , ) ->List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE : int = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE : Any = max_resolution
SCREAMING_SNAKE_CASE : List[str] = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean
SCREAMING_SNAKE_CASE : Optional[Any] = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : Any = rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = do_pad
def __lowerCAmelCase ( self ) ->List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=False ) ->Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE : Optional[Any] = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : Optional[Any] = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE : str = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE : Any = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE : str = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE : Tuple = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE : Tuple = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE : str = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : str = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE : List[str] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = YolosImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) ->int:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
pass
def __lowerCAmelCase ( self ) ->Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) ->Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) ->Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : int = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) ->Optional[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : int = self.image_processing_class(do_resize=_lowerCamelCase , do_normalize=_lowerCamelCase , do_rescale=_lowerCamelCase )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(_lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing_a(_lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self ) ->Dict:
# prepare image and target
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE : Optional[Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE : List[Any] = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowerCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowerCamelCase ) )
@slow
def __lowerCAmelCase ( self ) ->List[Any]:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
SCREAMING_SNAKE_CASE : Union[str, Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE : Optional[Any] = YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE : int = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Dict = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowerCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[Any] = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _lowerCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowerCamelCase ) )
| 313
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313
| 1
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
a__ : List[Any] = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCAmelCase_( a__ ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE : Union[str, Any] = False
elif args.student_type == "gpt2":
SCREAMING_SNAKE_CASE : Optional[Any] = False
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE : Any = False
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=a__ , required=a__ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=a__ , required=a__ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=a__ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=a__ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=a__ , required=a__ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=a__ , type=a__ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=a__ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=a__ , required=a__ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=a__ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=a__ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=a__ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=a__ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=a__ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=a__ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=a__ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=a__ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=a__ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=a__ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=a__ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=a__ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=a__ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=a__ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a__ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=a__ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=a__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=a__ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=a__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=a__ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=a__ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=a__ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=a__ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=a__ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=a__ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=a__ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=a__ , default=4_000 , help='''Checkpoint interval.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
sanity_checks(a__ )
# ARGS #
init_gpu_params(a__ )
set_seed(a__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(a__ ) , a__ , indent=4 )
git_log(args.dump_path )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = MODEL_CLASSES[args.student_type]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
SCREAMING_SNAKE_CASE : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
SCREAMING_SNAKE_CASE : int = tokenizer.all_special_tokens.index(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
SCREAMING_SNAKE_CASE : List[Any] = special_tok_ids
SCREAMING_SNAKE_CASE : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(a__ )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
SCREAMING_SNAKE_CASE : str = pickle.load(a__ )
SCREAMING_SNAKE_CASE : List[str] = np.maximum(a__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
SCREAMING_SNAKE_CASE : str = 0.0 # do not predict special tokens
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(a__ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = LmSeqsDataset(params=a__ , data=a__ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
SCREAMING_SNAKE_CASE : Any = student_config_class.from_pretrained(args.student_config )
SCREAMING_SNAKE_CASE : int = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
SCREAMING_SNAKE_CASE : Any = student_model_class.from_pretrained(args.student_pretrained_weights , config=a__ )
else:
SCREAMING_SNAKE_CASE : Tuple = student_model_class(a__ )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
SCREAMING_SNAKE_CASE : Union[str, Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=a__ )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(a__ , a__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(a__ , a__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Union[str, Any] = Distiller(
params=a__ , dataset=a__ , token_probs=a__ , student=a__ , teacher=a__ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 313
|
from maths.prime_check import is_prime
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(a__ )
if is_prime(a__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[str] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 32
@property
def __lowerCAmelCase ( self ) ->str:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Dict:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Tuple:
return 100
@property
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = self.dummy_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : List[str] = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
import string
import numpy
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , a__ )
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__SCREAMING_SNAKE_CASE : Dict = numpy.vectorize(lambda a__ : x % 36 )
__SCREAMING_SNAKE_CASE : Any = numpy.vectorize(a__ )
def __init__( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : Optional[int] = self.modulus(_lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
SCREAMING_SNAKE_CASE : str = encrypt_key.shape[0]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
return self.key_string.index(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
return self.key_string[round(_lowerCamelCase )]
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
SCREAMING_SNAKE_CASE : Optional[int] = det % len(self.key_string )
SCREAMING_SNAKE_CASE : Dict = len(self.key_string )
if greatest_common_divisor(_lowerCamelCase , len(self.key_string ) ) != 1:
SCREAMING_SNAKE_CASE : List[str] = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : Any = [char for char in text.upper() if char in self.key_string]
SCREAMING_SNAKE_CASE : Union[str, Any] = chars[-1]
while len(_lowerCamelCase ) % self.break_key != 0:
chars.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : int = self.process_text(text.upper() )
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
SCREAMING_SNAKE_CASE : Any = text[i : i + self.break_key]
SCREAMING_SNAKE_CASE : List[str] = [self.replace_letters(_lowerCamelCase ) for char in batch]
SCREAMING_SNAKE_CASE : int = numpy.array([vec] ).T
SCREAMING_SNAKE_CASE : str = self.modulus(self.encrypt_key.dot(_lowerCamelCase ) ).T.tolist()[
0
]
SCREAMING_SNAKE_CASE : Tuple = ''''''.join(
self.replace_digits(_lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __lowerCAmelCase ( self ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE : int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
SCREAMING_SNAKE_CASE : Optional[Any] = det % len(self.key_string )
SCREAMING_SNAKE_CASE : Tuple = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
SCREAMING_SNAKE_CASE : str = i
break
SCREAMING_SNAKE_CASE : str = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_lowerCamelCase ) )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : str = self.make_decrypt_key()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_text(text.upper() )
SCREAMING_SNAKE_CASE : Tuple = ''''''
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
SCREAMING_SNAKE_CASE : List[Any] = text[i : i + self.break_key]
SCREAMING_SNAKE_CASE : Tuple = [self.replace_letters(_lowerCamelCase ) for char in batch]
SCREAMING_SNAKE_CASE : Union[str, Any] = numpy.array([vec] ).T
SCREAMING_SNAKE_CASE : Optional[Any] = self.modulus(decrypt_key.dot(_lowerCamelCase ) ).T.tolist()[0]
SCREAMING_SNAKE_CASE : int = ''''''.join(
self.replace_digits(_lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = int(input('''Enter the order of the encryption key: ''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(a__ ):
SCREAMING_SNAKE_CASE : List[Any] = [int(a__ ) for x in input().split()]
hill_matrix.append(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = HillCipher(numpy.array(a__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
SCREAMING_SNAKE_CASE : List[str] = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
SCREAMING_SNAKE_CASE : Dict = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(a__ ) )
elif option == "2":
SCREAMING_SNAKE_CASE : Any = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 313
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a__ : List[str] = '''CompVis/stable-diffusion-v1-1'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
a__ : Any = '''CompVis/stable-diffusion-v1-3'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4'''
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str:
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) ->Dict[str, Any]:
return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 313
| 1
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
a__ : int = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
a__ : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
a__ : List[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
a__ : Union[str, Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
a__ : Any = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
a__ : List[Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
a__ : Optional[Any] = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = randrange(len(a__ ) ), randrange(len(a__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCAmelCase_( a__ = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(a__ ))
@pytest.mark.parametrize('''hand, expected''' , a__ )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
assert PokerHand(a__ )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , a__ )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
assert PokerHand(a__ )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , a__ )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = PokerHand(a__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , a__ )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
assert PokerHand(a__ )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , a__ )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
assert PokerHand(a__ )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , a__ )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
assert PokerHand(a__ ).compare_with(PokerHand(a__ ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
assert PokerHand(a__ ).compare_with(PokerHand(a__ ) ) == expected
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [PokerHand(a__ ) for hand in SORTED_HANDS]
SCREAMING_SNAKE_CASE : List[str] = poker_hands.copy()
shuffle(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = chain(sorted(a__ ) )
for index, hand in enumerate(a__ ):
assert hand == poker_hands[index]
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=a__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = PokerHand('''2C 4S AS 3D 5C''' )
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : str = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : str = os.path.abspath(os.path.dirname(a__ ) )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(a__ , '''poker_hands.txt''' )
with open(a__ ) as file_hand:
for line in file_hand:
SCREAMING_SNAKE_CASE : Union[str, Any] = line[:14].strip()
SCREAMING_SNAKE_CASE : Any = line[15:].strip()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = PokerHand(a__ ), PokerHand(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = player.compare_with(a__ )
if output == "Win":
answer += 1
assert answer == 376
| 313
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313
| 1
|
def UpperCAmelCase_( a__ = 100 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = set()
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = n + 1 # maximum limit
for a in range(2 , a__ ):
for b in range(2 , a__ ):
SCREAMING_SNAKE_CASE : str = a**b # calculates the current power
collect_powers.add(a__ ) # adds the result to the set
return len(a__ )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Tuple = frozenset([] )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = 32
SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL()
SCREAMING_SNAKE_CASE : Optional[Any] = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if pil_image:
SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase )
inputs.update({'''image_embeds''': None} )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) ->Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Dict = pipe(
_lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 313
| 1
|
import math
def UpperCAmelCase_( a__ , a__ = 0 , a__ = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = end or len(a__ )
for i in range(a__ , a__ ):
SCREAMING_SNAKE_CASE : int = i
SCREAMING_SNAKE_CASE : str = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
SCREAMING_SNAKE_CASE : List[Any] = array[temp_index - 1]
temp_index -= 1
SCREAMING_SNAKE_CASE : Any = temp_index_value
return array
def UpperCAmelCase_( a__ , a__ , a__ ): # Max Heap
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = index
SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * index + 1 # Left Node
SCREAMING_SNAKE_CASE : int = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
SCREAMING_SNAKE_CASE : Optional[int] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
SCREAMING_SNAKE_CASE : str = right_index
if largest != index:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = array[largest], array[index]
heapify(a__ , a__ , a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = len(a__ )
for i in range(n // 2 , -1 , -1 ):
heapify(a__ , a__ , a__ )
for i in range(n - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = array[0], array[i]
heapify(a__ , 0 , a__ )
return array
def UpperCAmelCase_( a__ , a__ , a__ , a__ ):
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCAmelCase_( a__ , a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = low
SCREAMING_SNAKE_CASE : Any = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = array[j], array[i]
i += 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) == 0:
return array
SCREAMING_SNAKE_CASE : Dict = 2 * math.ceil(math.loga(len(a__ ) ) )
SCREAMING_SNAKE_CASE : Any = 16
return intro_sort(a__ , 0 , len(a__ ) , a__ , a__ )
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a__ )
max_depth -= 1
SCREAMING_SNAKE_CASE : List[str] = median_of_a(a__ , a__ , start + ((end - start) // 2) + 1 , end - 1 )
SCREAMING_SNAKE_CASE : int = partition(a__ , a__ , a__ , a__ )
intro_sort(a__ , a__ , a__ , a__ , a__ )
SCREAMING_SNAKE_CASE : Any = p
return insertion_sort(a__ , a__ , a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Optional[Any] = input('''Enter numbers separated by a comma : ''').strip()
a__ : str = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 313
|
from abc import ABC, abstractmethod
from typing import List, Optional
class a_ ( a__ ):
"""simple docstring"""
def __init__( self ) ->List[str]:
# test for the above condition
self.test()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : List[Any] = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Union[str, Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids )
SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : str = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self ) ->Any:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict:
SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Dict = self.seqlen
SCREAMING_SNAKE_CASE : int = self.fulfilled_idx
SCREAMING_SNAKE_CASE : Tuple = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict:
SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : List[str] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Tuple = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = root
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : int = start[current_token]
SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = nested_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[Any] = completed
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = []
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]:
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : int = self.current_seq
SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : str = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str:
SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : str = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 313
| 1
|
import math
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[str] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(a__ )
if number < 1:
SCREAMING_SNAKE_CASE : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(a__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
SCREAMING_SNAKE_CASE : List[str] = int(math.log(number // 3 , 2 ) ) + 2
SCREAMING_SNAKE_CASE : Optional[Any] = [3, 5]
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : Dict = 3
for block in range(1 , a__ ):
for _ in range(a__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a__ : Optional[int] = 0
try:
a__ : Tuple = proth(number)
except ValueError:
print(F"ValueError: there is no {number}th Proth number")
continue
print(F"The {number}th Proth number: {value}")
| 313
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets(
a__ , a__ , number=a__ , min_len=1_026 , trim=a__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ )
print('''computing perplexity on objective set''' )
SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item()
print('''perplexity on objective set:''' , a__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ )
# Train secondary learner
SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner(
a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ )
SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ )
SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(a__ )
secondary_learner.eval()
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Tuple = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
for epoch in range(int(a__ ) ):
for step, example in enumerate(a__ ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward(
torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE : str = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE : List[str] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=a__ , default=a__ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=a__ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=a__ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner(
a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 313
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'ibert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Dict = force_dequant
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = filter(lambda a__ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Any = logging.getLogger(__name__)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if metric == "rouge2":
SCREAMING_SNAKE_CASE : str = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
SCREAMING_SNAKE_CASE : List[Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
SCREAMING_SNAKE_CASE : int = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
SCREAMING_SNAKE_CASE : int = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
SCREAMING_SNAKE_CASE : Dict = ModelCheckpoint(
dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a__ , verbose=a__ , )
class a_ ( pl.Callback ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE : Any = od / '''test_results.txt'''
SCREAMING_SNAKE_CASE : Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
SCREAMING_SNAKE_CASE : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , '''a+''' ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE : Tuple = metrics[key]
if isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = val.item()
SCREAMING_SNAKE_CASE : Tuple = F"""{key}: {val:.6f}\n"""
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
try:
SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE : Optional[int] = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE : int = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 313
| 1
|
a__ : List[Any] = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a__ ) )
def UpperCAmelCase_( ):
"""simple docstring"""
return sum(
number
for number in range(1_000 , 1_000_000 )
if number == digits_fifth_powers_sum(a__ ) )
if __name__ == "__main__":
print(solution())
| 313
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE : str = ord(a__ )
if not _is_chinese_char(a__ ):
return 0
return 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = set()
for token in tokens:
SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ )
if chinese_word:
word_set.add(a__ )
SCREAMING_SNAKE_CASE : str = list(a__ )
return word_list
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE : Tuple = bert_tokens
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ )
while start < end:
SCREAMING_SNAKE_CASE : Dict = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ )
for i in range(a__ , 1 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j]
SCREAMING_SNAKE_CASE : List[str] = start + i
SCREAMING_SNAKE_CASE : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res]
ltp_res.extend(a__ )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : Any = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : int = []
for input_ids, chinese_word in zip(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = []
for id in input_ids:
SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ )
input_tokens.append(a__ )
SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a__ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE : Optional[int] = token[2:]
# save chinese tokens' pos
if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ):
ref_id.append(a__ )
ref_ids.append(a__ )
assert len(a__ ) == len(a__ )
return ref_ids
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids]
f.writelines(a__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
a__ : int = parser.parse_args()
main(args)
| 313
| 1
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = []
for part_id in partition_order:
SCREAMING_SNAKE_CASE : Union[str, Any] = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(a__ ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE : Tuple = spark.range(100 ).repartition(1 )
SCREAMING_SNAKE_CASE : Optional[int] = Spark(a__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE : List[Any] = spark.range(10 ).repartition(2 )
SCREAMING_SNAKE_CASE : Dict = [1, 0]
SCREAMING_SNAKE_CASE : Tuple = _generate_iterable_examples(a__ , a__ ) # Reverse the partitions.
SCREAMING_SNAKE_CASE : str = _get_expected_row_ids_and_row_dicts_for_partition_order(a__ , a__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE : List[str] = spark.range(10 ).repartition(1 )
SCREAMING_SNAKE_CASE : int = SparkExamplesIterable(a__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(a__ ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE : List[Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
SCREAMING_SNAKE_CASE : List[str] = lambda a__ : x.reverse()
SCREAMING_SNAKE_CASE : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a__ , [2, 1, 0] )
SCREAMING_SNAKE_CASE : Tuple = SparkExamplesIterable(a__ ).shuffle_data_sources(a__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(a__ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
SCREAMING_SNAKE_CASE : Union[str, Any] = SparkExamplesIterable(a__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(a__ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
SCREAMING_SNAKE_CASE : List[Any] = SparkExamplesIterable(a__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(a__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(a__ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE : Any = spark.range(100 ).repartition(1 )
SCREAMING_SNAKE_CASE : int = Spark(a__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 313
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Tuple = '''1'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Dict = '''1'''
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : Dict = '''alsa'''
SCREAMING_SNAKE_CASE : Any = '''default'''
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation'''
SCREAMING_SNAKE_CASE : Optional[int] = ''':0'''
elif system == "Windows":
SCREAMING_SNAKE_CASE : int = '''dshow'''
SCREAMING_SNAKE_CASE : Any = '''default'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s
SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Any = np.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = b''''''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
SCREAMING_SNAKE_CASE : List[str] = False
yield item
SCREAMING_SNAKE_CASE : Dict = stride_left
SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
yield item
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 313
| 1
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ )
return k
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE : Dict = v.T
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE : int = task_specific_params
SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : Any = Path(args.tf_ckpt_path).parent.name
a__ : int = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
| 1
|
def UpperCAmelCase_( a__ = 50 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 313
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr'
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = use_timm_backbone
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = backbone
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : str = num_feature_levels
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points
SCREAMING_SNAKE_CASE : Any = decoder_n_points
SCREAMING_SNAKE_CASE : str = two_stage
SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : int = class_cost
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost
SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) ->int:
return self.d_model
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 313
| 1
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = VQModel
__SCREAMING_SNAKE_CASE : Any = 'sample'
@property
def __lowerCAmelCase ( self , _lowerCamelCase=(32, 32) ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : Optional[int] = 3
SCREAMING_SNAKE_CASE : str = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
return {"sample": image}
@property
def __lowerCAmelCase ( self ) ->List[str]:
return (3, 32, 32)
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return (3, 32, 32)
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) ->Union[str, Any]:
pass
def __lowerCAmelCase ( self ) ->Union[str, Any]:
pass
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Tuple = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(_lowerCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
SCREAMING_SNAKE_CASE : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
SCREAMING_SNAKE_CASE : Optional[Any] = image.to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase ).sample
SCREAMING_SNAKE_CASE : Any = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE : Any = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
| 313
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,)
__SCREAMING_SNAKE_CASE : Optional[int] = 10
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCamelCase )
return config
def __lowerCAmelCase ( self ) ->Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = output.prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 313
| 1
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase_( a__ , a__ , a__ = "x" , a__ = 10**-10 , a__ = 1 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = symbols(a__ )
SCREAMING_SNAKE_CASE : int = lambdify(a__ , a__ )
SCREAMING_SNAKE_CASE : str = lambdify(a__ , diff(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
SCREAMING_SNAKE_CASE : List[str] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE : List[str] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(F"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F"{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}",
)
# Find root of cos(x)
print(F"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 313
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313
| 1
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
a__ : Dict = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def UpperCAmelCase_( a__ , a__ , a__ , a__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = XLNetConfig.from_json_file(a__ )
SCREAMING_SNAKE_CASE : List[str] = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
SCREAMING_SNAKE_CASE : List[Any] = finetuning_task
SCREAMING_SNAKE_CASE : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
SCREAMING_SNAKE_CASE : Tuple = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
SCREAMING_SNAKE_CASE : Optional[Any] = finetuning_task
SCREAMING_SNAKE_CASE : Tuple = XLNetForQuestionAnswering(a__ )
else:
SCREAMING_SNAKE_CASE : int = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(a__ , a__ )
SCREAMING_SNAKE_CASE : str = os.path.join(a__ , a__ )
print(F"""Save PyTorch model to {os.path.abspath(a__ )}""" )
torch.save(model.state_dict() , a__ )
print(F"""Save configuration file to {os.path.abspath(a__ )}""" )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
a__ : str = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 313
|
from __future__ import annotations
import math
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
SCREAMING_SNAKE_CASE : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
SCREAMING_SNAKE_CASE : str = len(a__ )
SCREAMING_SNAKE_CASE : Any = matrix_length // 2
SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : Optional[int] = [
[a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ )
]
SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return len(a__ ), len(matrix[0] )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
print('''\n'''.join(str(a__ ) for line in matrix ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ ) == (2, 2):
return default_matrix_multiplication(a__ , a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(a__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]:
SCREAMING_SNAKE_CASE : Any = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a__ )
SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ )
SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = matrixa
SCREAMING_SNAKE_CASE : Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ )
# Removing the additional zeros
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 313
| 1
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a_ ( a__ , a__ , a__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False , ) ->int:
super().__init__()
SCREAMING_SNAKE_CASE : List[Any] = nn.Embedding(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Embedding(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : List[Any] = nn.Dropout(p=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = TaConfig(
vocab_size=_lowerCamelCase , d_model=_lowerCamelCase , num_heads=_lowerCamelCase , d_kv=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase , feed_forward_proj=_lowerCamelCase , is_decoder=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = TaBlock(_lowerCamelCase )
self.encoders.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(p=_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : int = self.token_embedder(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE : List[Any] = torch.arange(_lowerCamelCase , device=encoder_input_tokens.device )
x += self.position_encoding(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dropout_pre(_lowerCamelCase )
# inverted the attention mask
SCREAMING_SNAKE_CASE : List[str] = encoder_input_tokens.size()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_extended_attention_mask(_lowerCamelCase , _lowerCamelCase )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE : Any = lyr(_lowerCamelCase , _lowerCamelCase )[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(_lowerCamelCase )
return self.dropout_post(_lowerCamelCase ), encoder_inputs_mask
| 313
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any:
SCREAMING_SNAKE_CASE : str = scheduler
SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches
SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer
SCREAMING_SNAKE_CASE : List[str] = GradientState()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes
for _ in range(_lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return self.scheduler.get_last_lr()
def __lowerCAmelCase ( self ) ->List[str]:
return self.scheduler.state_dict()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
self.scheduler.load_state_dict(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
return self.scheduler.get_lr()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]:
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
| 313
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = (32, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCamelCase )
return image
@property
def __lowerCAmelCase ( self ) ->Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __lowerCAmelCase ( self ) ->List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self ) ->List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
def extract(*_lowerCamelCase , **_lowerCamelCase ):
class a_ :
"""simple docstring"""
def __init__( self ) ->Any:
SCREAMING_SNAKE_CASE : List[Any] = torch.ones([0] )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
self.pixel_values.to(_lowerCamelCase )
return self
return Out()
return extract
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : Optional[Any] = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_vae
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE : int = 77
SCREAMING_SNAKE_CASE : int = self.dummy_image.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : Dict = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = alt_pipe.to(_lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = alt_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = alt_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_vae
SCREAMING_SNAKE_CASE : int = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Dict = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE : Optional[int] = 77
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image.to(_lowerCamelCase )
# put models in fp16
SCREAMING_SNAKE_CASE : List[Any] = unet.half()
SCREAMING_SNAKE_CASE : List[Any] = vae.half()
SCREAMING_SNAKE_CASE : str = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : Tuple = AltDiffusionImgaImgPipeline(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = alt_pipe.to(_lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = alt_pipe(
[prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''np''' , image=_lowerCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE : Union[str, Any] = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE : str = '''BAAI/AltDiffusion'''
SCREAMING_SNAKE_CASE : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : Optional[int] = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_lowerCamelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
SCREAMING_SNAKE_CASE : List[str] = '''BAAI/AltDiffusion'''
SCREAMING_SNAKE_CASE : Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : str = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_lowerCamelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 313
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ )
return k
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE : Dict = v.T
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE : int = task_specific_params
SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : Any = Path(args.tf_ckpt_path).parent.name
a__ : int = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : int = job['''started_at''']
SCREAMING_SNAKE_CASE : int = job['''completed_at''']
SCREAMING_SNAKE_CASE : Optional[Any] = date_parser.parse(a__ )
SCREAMING_SNAKE_CASE : int = date_parser.parse(a__ )
SCREAMING_SNAKE_CASE : List[str] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = start
SCREAMING_SNAKE_CASE : Tuple = end
SCREAMING_SNAKE_CASE : List[Any] = duration_in_min
return job_info
def UpperCAmelCase_( a__ , a__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = None
if token is not None:
SCREAMING_SNAKE_CASE : Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
SCREAMING_SNAKE_CASE : List[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
SCREAMING_SNAKE_CASE : Any = requests.get(a__ , headers=a__ ).json()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(a__ ) for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE : Union[str, Any] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(a__ ):
SCREAMING_SNAKE_CASE : Any = requests.get(url + F"""&page={i + 2}""" , headers=a__ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(a__ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
a__ : Dict = parser.parse_args()
a__ : List[Any] = get_job_time(args.workflow_run_id)
a__ : str = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"{k}: {v['duration']}")
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Union[str, Any] = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase_( a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def UpperCAmelCase_( a__ , a__ , a__=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE : Any = ''''''
else:
SCREAMING_SNAKE_CASE : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ViTMSNConfig()
SCREAMING_SNAKE_CASE : Optional[int] = 1_000
SCREAMING_SNAKE_CASE : str = '''datasets/huggingface/label-files'''
SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ ) , '''r''' ) )
SCREAMING_SNAKE_CASE : List[Any] = {int(a__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = idalabel
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = 384
SCREAMING_SNAKE_CASE : Any = 1_536
SCREAMING_SNAKE_CASE : List[str] = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = 1_024
SCREAMING_SNAKE_CASE : Optional[int] = 4_096
SCREAMING_SNAKE_CASE : Tuple = 24
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : Dict = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024
SCREAMING_SNAKE_CASE : List[Any] = 4_096
SCREAMING_SNAKE_CASE : List[Any] = 24
SCREAMING_SNAKE_CASE : Tuple = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNModel(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['''target_encoder''']
SCREAMING_SNAKE_CASE : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(a__ )
SCREAMING_SNAKE_CASE : Any = create_rename_keys(a__ , base_model=a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , base_model=a__ )
model.load_state_dict(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(a__ , stream=a__ ).raw )
SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=a__ , image_std=a__ )
SCREAMING_SNAKE_CASE : int = image_processor(images=a__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : Tuple = model(**a__ )
SCREAMING_SNAKE_CASE : str = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
# Function to print upper half of diamond (pyramid)
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for i in range(0 , a__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for i in range(a__ , 0 , -1 ):
for _ in range(a__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(a__ ) # upper half
reverse_floyd(a__ ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
a__ : Any = 1
while K:
a__ : Optional[int] = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
a__ : Optional[Any] = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 313
|
import csv
import tweepy
# Twitter API credentials
a__ : Union[str, Any] = ''''''
a__ : List[str] = ''''''
a__ : Any = ''''''
a__ : List[str] = ''''''
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ )
auth.set_access_token(a__ , a__ )
SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 )
# save most recent tweets
alltweets.extend(a__ )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a__ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE : Any = api.user_timeline(
screen_name=a__ , count=200 , max_id=a__ )
# save most recent tweets
alltweets.extend(a__ )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1
print(F"""...{len(a__ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(a__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 313
| 1
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a_ ( a__ ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''width_multiplier''' ) )
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=64 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase="swish" , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=None , _lowerCamelCase=0.2_5 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : int = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : int = make_divisible(512 * width_multiplier , divisor=8 )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : int = conv_kernel_size
SCREAMING_SNAKE_CASE : List[str] = output_stride
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Any = scope
SCREAMING_SNAKE_CASE : Union[str, Any] = width_multiplier
SCREAMING_SNAKE_CASE : str = ffn_dropout
SCREAMING_SNAKE_CASE : Optional[int] = attn_dropout
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCAmelCase ( self ) ->Tuple:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : List[str] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Tuple = self.num_labels
SCREAMING_SNAKE_CASE : Dict = MobileViTVaForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : str = False
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTVaModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTVaConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->str:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[Any] = 5
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : str = 2
for i in range(len(_lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->Any:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) ->str:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : List[Any] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**_lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Tuple = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=_lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
SCREAMING_SNAKE_CASE : Optional[int] = model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : str = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase , target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
| 313
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'ibert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Dict = force_dequant
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313
| 1
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a__ : int = logging.get_logger(__name__)
a__ : List[str] = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
a__ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE : Optional[int] = model_type_to_module_name(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(a__ , a__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(a__ , '''__name__''' , a__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE : Optional[int] = importlib.import_module('''transformers''' )
if hasattr(a__ , a__ ):
return getattr(a__ , a__ )
return None
def UpperCAmelCase_( a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = get_file_from_repo(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(a__ , encoding='''utf-8''' ) as reader:
return json.load(a__ )
class a_ :
"""simple docstring"""
def __init__( self ) ->Union[str, Any]:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_lowerCamelCase )
def __lowerCAmelCase ( cls , _lowerCamelCase , **_lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''config''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''trust_remote_code''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = ImageProcessingMixin.get_image_processor_dict(_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = config_dict.get('''image_processor_type''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE : str = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
SCREAMING_SNAKE_CASE : Any = config_dict.pop('''feature_extractor_type''' , _lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
SCREAMING_SNAKE_CASE : List[str] = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE : Tuple = config_dict['''auto_map''']['''AutoFeatureExtractor''']
SCREAMING_SNAKE_CASE : List[str] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# It could be in `config.image_processor_type``
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , '''image_processor_type''' , _lowerCamelCase )
if hasattr(_lowerCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
SCREAMING_SNAKE_CASE : Tuple = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
SCREAMING_SNAKE_CASE : Optional[int] = image_processor_class_from_name(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = image_processor_auto_map is not None
SCREAMING_SNAKE_CASE : List[Any] = image_processor_class is not None or type(_lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
SCREAMING_SNAKE_CASE : str = resolve_trust_remote_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE : List[Any] = get_class_from_dynamic_module(
_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''code_revision''' , _lowerCamelCase )
if os.path.isdir(_lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
SCREAMING_SNAKE_CASE : Any = IMAGE_PROCESSOR_MAPPING[type(_lowerCamelCase )]
return image_processor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowerCAmelCase ( _lowerCamelCase , _lowerCamelCase ) ->Tuple:
IMAGE_PROCESSOR_MAPPING.register(_lowerCamelCase , _lowerCamelCase )
| 313
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313
| 1
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) ->Tuple:
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Dict = embeddings_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_sizes
SCREAMING_SNAKE_CASE : str = depths
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : Any = scope
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->List[str]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[str] = TFResNetModel(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = model(_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = TFResNetForImageClassification(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Dict = False
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = TFResNetModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->str:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
pass
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[int] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE : List[str] = layer_type
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->List[Any]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = TFResNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) ->Optional[int]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE : str = self.default_image_processor
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=_lowerCamelCase , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE : Optional[int] = model(**_lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCamelCase , atol=1e-4 ) )
| 313
|
from maths.prime_check import is_prime
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(a__ )
if is_prime(a__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Any = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Tuple = json.load(open(hf_hub_download(a__ , a__ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : int = {int(a__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE : Tuple = BitConfig(
conv_layer=a__ , num_labels=1_000 , idalabel=a__ , labelaid=a__ , )
return config
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if "stem.conv" in name:
SCREAMING_SNAKE_CASE : int = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE : int = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE : int = '''bit.encoder.''' + name
return name
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_( a__ , a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = get_config(a__ )
# load original model from timm
SCREAMING_SNAKE_CASE : List[Any] = create_model(a__ , pretrained=a__ )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE : Dict = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(a__ )
SCREAMING_SNAKE_CASE : List[str] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE : Optional[Any] = BitForImageClassification(a__ )
model.eval()
model.load_state_dict(a__ )
# create image processor
SCREAMING_SNAKE_CASE : Any = create_transform(**resolve_data_config({} , model=a__ ) )
SCREAMING_SNAKE_CASE : int = transform.transforms
SCREAMING_SNAKE_CASE : Optional[int] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : Tuple = BitImageProcessor(
do_resize=a__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = transform(a__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = processor(a__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(a__ , a__ )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(a__ )
SCREAMING_SNAKE_CASE : int = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE : List[Any] = timm_model(a__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a__ , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(a__ ).mkdir(exist_ok=a__ )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
a__ : Any = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[str] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 32
@property
def __lowerCAmelCase ( self ) ->str:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Dict:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Tuple:
return 100
@property
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = self.dummy_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : List[str] = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="resnet50" , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Tuple = out_indices if out_indices is not None else [4]
SCREAMING_SNAKE_CASE : Dict = stage_names
SCREAMING_SNAKE_CASE : Optional[Any] = out_features
SCREAMING_SNAKE_CASE : Optional[Any] = backbone
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : int = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Any = is_training
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, pixel_values
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = TimmBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(_lowerCamelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = (TimmBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : str = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Tuple = TimmBackboneModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = '''resnet18'''
SCREAMING_SNAKE_CASE : Dict = '''microsoft/resnet-18'''
SCREAMING_SNAKE_CASE : int = AutoBackbone.from_pretrained(_lowerCamelCase , use_timm_backbone=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = AutoBackbone.from_pretrained(_lowerCamelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoBackbone.from_pretrained(_lowerCamelCase , use_timm_backbone=_lowerCamelCase , out_indices=[1, 2, 3] )
SCREAMING_SNAKE_CASE : Dict = AutoBackbone.from_pretrained(_lowerCamelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __lowerCAmelCase ( self ) ->Any:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __lowerCAmelCase ( self ) ->Any:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __lowerCAmelCase ( self ) ->int:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __lowerCAmelCase ( self ) ->str:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __lowerCAmelCase ( self ) ->int:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __lowerCAmelCase ( self ) ->int:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Dict = self.has_attentions
# no need to test all models as different heads yield the same functionality
SCREAMING_SNAKE_CASE : Optional[int] = self.all_model_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = outputs[0][-1]
# Encoder-/Decoder-only models
SCREAMING_SNAKE_CASE : Tuple = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
SCREAMING_SNAKE_CASE : List[str] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(**_lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : int = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(**_lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowerCamelCase )
| 313
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a__ : List[str] = '''CompVis/stable-diffusion-v1-1'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
a__ : Any = '''CompVis/stable-diffusion-v1-3'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4'''
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str:
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) ->Dict[str, Any]:
return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 313
| 1
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=64 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ) ->str:
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Any = embedding_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Dict = num_choices
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) ->Optional[int]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : Optional[Any] = MegatronBertModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : str = MegatronBertForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = MegatronBertForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = MegatronBertForNextSentencePrediction(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : Any = MegatronBertForPreTraining(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , next_sentence_label=_lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = MegatronBertForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE : str = MegatronBertForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : Any = MegatronBertForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : str = MegatronBertForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Tuple = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Any = True
# test_resize_embeddings = False
__SCREAMING_SNAKE_CASE : Dict = False
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) ->Tuple:
SCREAMING_SNAKE_CASE : Dict = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class in get_values(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
return inputs_dict
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : int = MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowerCamelCase )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return torch.tensor(
a__ , dtype=torch.long , device=a__ , )
a__ : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('''Model is not available.''' )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = MegatronBertModel.from_pretrained(_lowerCamelCase )
model.to(_lowerCamelCase )
model.half()
SCREAMING_SNAKE_CASE : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE : int = output[0, ii, jj]
SCREAMING_SNAKE_CASE : Tuple = expected[3 * ii + jj]
SCREAMING_SNAKE_CASE : Any = '''ii={} jj={} a={} b={}'''.format(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.assertTrue(math.isclose(_lowerCamelCase , _lowerCamelCase , rel_tol=_lowerCamelCase , abs_tol=_lowerCamelCase ) , msg=_lowerCamelCase )
| 313
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313
| 1
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return (data["data"], data["target"])
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = XGBClassifier()
classifier.fit(a__ , a__ )
return classifier
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = load_iris()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = data_handling(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = train_test_split(
a__ , a__ , test_size=0.25 )
SCREAMING_SNAKE_CASE : int = iris['''target_names''']
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE : Dict = xgboost(a__ , a__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
a__ , a__ , a__ , display_labels=a__ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Tuple = frozenset([] )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = 32
SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL()
SCREAMING_SNAKE_CASE : Optional[Any] = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if pil_image:
SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase )
inputs.update({'''image_embeds''': None} )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) ->Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Dict = pipe(
_lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 313
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a__ : Dict = logging.get_logger(__name__)
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = ['input_features', 'is_longer']
def __init__( self , _lowerCamelCase=64 , _lowerCamelCase=4_8000 , _lowerCamelCase=480 , _lowerCamelCase=10 , _lowerCamelCase=1024 , _lowerCamelCase=0.0 , _lowerCamelCase=False , _lowerCamelCase = 0 , _lowerCamelCase = 1_4000 , _lowerCamelCase = None , _lowerCamelCase = "fusion" , _lowerCamelCase = "repeatpad" , **_lowerCamelCase , ) ->int:
super().__init__(
feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[Any] = top_db
SCREAMING_SNAKE_CASE : Dict = truncation
SCREAMING_SNAKE_CASE : str = padding
SCREAMING_SNAKE_CASE : Any = fft_window_size
SCREAMING_SNAKE_CASE : str = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : str = hop_length
SCREAMING_SNAKE_CASE : int = max_length_s
SCREAMING_SNAKE_CASE : str = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : Optional[Any] = frequency_min
SCREAMING_SNAKE_CASE : int = frequency_max
SCREAMING_SNAKE_CASE : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCamelCase , min_frequency=_lowerCamelCase , max_frequency=_lowerCamelCase , sampling_rate=_lowerCamelCase , norm=_lowerCamelCase , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCamelCase , min_frequency=_lowerCamelCase , max_frequency=_lowerCamelCase , sampling_rate=_lowerCamelCase , norm='''slaney''' , mel_scale='''slaney''' , )
def __lowerCAmelCase ( self ) ->Dict[str, Any]:
SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->np.ndarray:
SCREAMING_SNAKE_CASE : Dict = spectrogram(
_lowerCamelCase , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_lowerCamelCase , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : int = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Dict = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Tuple = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : str = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : Dict = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : List[Any] = torch.nn.functional.interpolate(
_lowerCamelCase , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Union[str, Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : List[Any] = len(_lowerCamelCase ) - max_length
SCREAMING_SNAKE_CASE : Any = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : int = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : Optional[Any] = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : Optional[int] = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE : Any = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : int = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Any = False
else:
SCREAMING_SNAKE_CASE : int = self._random_mel_fusion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
SCREAMING_SNAKE_CASE : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : str = int(max_length / len(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Tuple = np.stack(np.tile(_lowerCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = np.pad(_lowerCamelCase , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) ->BatchFeature:
SCREAMING_SNAKE_CASE : Union[str, Any] = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : Tuple = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : Any = isinstance(_lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : Optional[Any] = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(_lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : Union[str, Any] = [
self._get_input_mel(_lowerCamelCase , max_length if max_length else self.nb_max_samples , _lowerCamelCase , _lowerCamelCase )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[Any] = []
for mel, longer in padded_inputs:
input_mel.append(_lowerCamelCase )
is_longer.append(_lowerCamelCase )
if truncation == "fusion" and sum(_lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : str = np.random.randint(0 , len(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = True
if isinstance(input_mel[0] , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : int = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature(_lowerCamelCase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = input_features.convert_to_tensors(_lowerCamelCase )
return input_features
| 313
|
from abc import ABC, abstractmethod
from typing import List, Optional
class a_ ( a__ ):
"""simple docstring"""
def __init__( self ) ->List[str]:
# test for the above condition
self.test()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : List[Any] = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Union[str, Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids )
SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : str = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self ) ->Any:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict:
SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Dict = self.seqlen
SCREAMING_SNAKE_CASE : int = self.fulfilled_idx
SCREAMING_SNAKE_CASE : Tuple = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict:
SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : List[str] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Tuple = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = root
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : int = start[current_token]
SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = nested_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[Any] = completed
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = []
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]:
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : int = self.current_seq
SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : str = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str:
SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : str = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 313
| 1
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(a__ )] )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , a__ ) ) , x.transpose() ) , a__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = (1, 2, 1)
SCREAMING_SNAKE_CASE : int = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE : Dict = SARIMAX(
a__ , exog=a__ , order=a__ , seasonal_order=a__ )
SCREAMING_SNAKE_CASE : str = model.fit(disp=a__ , maxiter=600 , method='''nm''' )
SCREAMING_SNAKE_CASE : Tuple = model_fit.predict(1 , len(a__ ) , exog=[test_match] )
return result[0]
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[int] = regressor.predict(a__ )
return y_pred[0]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
train_user.sort()
SCREAMING_SNAKE_CASE : Any = np.percentile(a__ , 25 )
SCREAMING_SNAKE_CASE : List[str] = np.percentile(a__ , 75 )
SCREAMING_SNAKE_CASE : Dict = qa - qa
SCREAMING_SNAKE_CASE : Union[str, Any] = qa - (iqr * 0.1)
return low_lim
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE : List[str] = not_safe + 1
else:
if abs(abs(a__ ) - abs(a__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
a__ : Optional[Any] = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
a__ : Optional[int] = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
a__ : str = Normalizer().fit_transform(data_input_df.values)
# split data
a__ : int = normalize_df[:, 2].tolist()
a__ : int = normalize_df[:, 0].tolist()
a__ : Optional[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
a__ : Union[str, Any] = normalize_df[:, [1, 2]].tolist()
a__ : Optional[int] = x[: len(x) - 1]
a__ : List[str] = x[len(x) - 1 :]
# for linear regression & sarimax
a__ : str = total_date[: len(total_date) - 1]
a__ : Optional[int] = total_user[: len(total_user) - 1]
a__ : Optional[Any] = total_match[: len(total_match) - 1]
a__ : int = total_date[len(total_date) - 1 :]
a__ : List[str] = total_user[len(total_user) - 1 :]
a__ : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
a__ : Union[str, Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
a__ : Union[str, Any] = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 313
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets(
a__ , a__ , number=a__ , min_len=1_026 , trim=a__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ )
print('''computing perplexity on objective set''' )
SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item()
print('''perplexity on objective set:''' , a__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ )
# Train secondary learner
SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner(
a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ )
SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ )
SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(a__ )
secondary_learner.eval()
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Tuple = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
for epoch in range(int(a__ ) ):
for step, example in enumerate(a__ ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward(
torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE : str = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE : List[str] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=a__ , default=a__ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=a__ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=a__ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner(
a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 313
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=19 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Dict = seq_length
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : Tuple = use_labels
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : str = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_lowerCamelCase , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = EsmForProteinFolding(config=_lowerCamelCase ).float()
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = (EsmForProteinFolding,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[int] = ()
__SCREAMING_SNAKE_CASE : str = {} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = EsmFoldModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Optional[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
@unittest.skip('''Does not support attention outputs''' )
def __lowerCAmelCase ( self ) ->int:
pass
@unittest.skip
def __lowerCAmelCase ( self ) ->Dict:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowerCAmelCase ( self ) ->int:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def __lowerCAmelCase ( self ) ->str:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def __lowerCAmelCase ( self ) ->int:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
pass
@require_torch
class a_ ( a__ ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : int = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
SCREAMING_SNAKE_CASE : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase )['''positions''']
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _lowerCamelCase , atol=1e-4 ) )
| 313
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = filter(lambda a__ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Any = logging.getLogger(__name__)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if metric == "rouge2":
SCREAMING_SNAKE_CASE : str = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
SCREAMING_SNAKE_CASE : List[Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
SCREAMING_SNAKE_CASE : int = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
SCREAMING_SNAKE_CASE : int = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
SCREAMING_SNAKE_CASE : Dict = ModelCheckpoint(
dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a__ , verbose=a__ , )
class a_ ( pl.Callback ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE : Any = od / '''test_results.txt'''
SCREAMING_SNAKE_CASE : Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
SCREAMING_SNAKE_CASE : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , '''a+''' ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE : Tuple = metrics[key]
if isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = val.item()
SCREAMING_SNAKE_CASE : Tuple = F"""{key}: {val:.6f}\n"""
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
try:
SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE : Optional[int] = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE : int = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 313
| 1
|
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = int(a__ )
assert noofclusters < len(a__ )
# Find out the dimensionality
SCREAMING_SNAKE_CASE : Dict = len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE : Any = list(range(len(a__ ) ) )
shuffle(a__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE : Optional[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE : Union[str, Any] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(a__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE : str = tf.placeholder('''float64''' , [dim] )
SCREAMING_SNAKE_CASE : Dict = []
for centroid in centroids:
cent_assigns.append(tf.assign(a__ , a__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE : int = [tf.Variable(0 ) for i in range(len(a__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE : str = tf.placeholder('''int32''' )
SCREAMING_SNAKE_CASE : int = []
for assignment in assignments:
cluster_assigns.append(tf.assign(a__ , a__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE : Optional[int] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE : int = tf.reduce_mean(a__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.placeholder('''float''' , [dim] )
SCREAMING_SNAKE_CASE : str = tf.placeholder('''float''' , [dim] )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(a__ , a__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE : int = tf.placeholder('''float''' , [noofclusters] )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.argmin(a__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE : int = tf.initialize_all_variables()
# Initialize all variables
sess.run(a__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE : Tuple = 100
for _ in range(a__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(a__ ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE : Optional[int] = [
sess.run(a__ , feed_dict={va: vect, va: sess.run(a__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE : Optional[int] = sess.run(
a__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(a__ ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE : str = [
vectors[i]
for i in range(len(a__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE : List[Any] = sess.run(
a__ , feed_dict={mean_input: array(a__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE : Optional[Any] = sess.run(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = sess.run(a__ )
return centroids, assignments
| 313
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE : str = ord(a__ )
if not _is_chinese_char(a__ ):
return 0
return 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = set()
for token in tokens:
SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ )
if chinese_word:
word_set.add(a__ )
SCREAMING_SNAKE_CASE : str = list(a__ )
return word_list
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE : Tuple = bert_tokens
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ )
while start < end:
SCREAMING_SNAKE_CASE : Dict = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ )
for i in range(a__ , 1 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j]
SCREAMING_SNAKE_CASE : List[str] = start + i
SCREAMING_SNAKE_CASE : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res]
ltp_res.extend(a__ )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : Any = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : int = []
for input_ids, chinese_word in zip(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = []
for id in input_ids:
SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ )
input_tokens.append(a__ )
SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a__ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE : Optional[int] = token[2:]
# save chinese tokens' pos
if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ):
ref_id.append(a__ )
ref_ids.append(a__ )
assert len(a__ ) == len(a__ )
return ref_ids
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids]
f.writelines(a__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
a__ : int = parser.parse_args()
main(args)
| 313
| 1
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , a__ , )
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = RobertaConfig
__SCREAMING_SNAKE_CASE : Optional[int] = 'roberta'
def __init__( self , _lowerCamelCase ) ->Tuple:
super().__init__(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = RobertaEmbeddings(_lowerCamelCase )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , a__ , )
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = RobertaConfig
__SCREAMING_SNAKE_CASE : List[Any] = 'roberta'
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
super().__init__(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = config.num_labels
SCREAMING_SNAKE_CASE : Any = config.num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = DeeRobertaModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE : Dict = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=-1 , _lowerCamelCase=False , ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = self.num_layers
try:
SCREAMING_SNAKE_CASE : Optional[int] = self.roberta(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , position_ids=_lowerCamelCase , head_mask=_lowerCamelCase , inputs_embeds=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : str = outputs[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dropout(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.classifier(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE : Union[str, Any] = e.message
SCREAMING_SNAKE_CASE : Optional[Any] = e.exit_layer
SCREAMING_SNAKE_CASE : List[str] = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE : Optional[int] = entropy(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : Dict = MSELoss()
SCREAMING_SNAKE_CASE : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : str = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE : Any = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE : str = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : List[Any] = MSELoss()
SCREAMING_SNAKE_CASE : Dict = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : Dict = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowerCamelCase )
if train_highway:
SCREAMING_SNAKE_CASE : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE : List[str] = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE : str = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE : Any = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 313
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Tuple = '''1'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Dict = '''1'''
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : Dict = '''alsa'''
SCREAMING_SNAKE_CASE : Any = '''default'''
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation'''
SCREAMING_SNAKE_CASE : Optional[int] = ''':0'''
elif system == "Windows":
SCREAMING_SNAKE_CASE : int = '''dshow'''
SCREAMING_SNAKE_CASE : Any = '''default'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s
SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Any = np.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = b''''''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
SCREAMING_SNAKE_CASE : List[str] = False
yield item
SCREAMING_SNAKE_CASE : Dict = stride_left
SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
yield item
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 313
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[2, 2, 3, 2] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=["stage2", "stage3", "stage4"] , _lowerCamelCase=3 , _lowerCamelCase=None , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = num_stages
SCREAMING_SNAKE_CASE : int = hidden_sizes
SCREAMING_SNAKE_CASE : str = depths
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : int = use_labels
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Any = out_features
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : int = scope
SCREAMING_SNAKE_CASE : List[str] = num_stages
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->Dict:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCAmelCase ( self ) ->List[str]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = UperNetForSemanticSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[str] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : int = False
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = UperNetModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->Optional[Any]:
return
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowerCAmelCase ( self ) ->int:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowerCAmelCase ( self ) ->str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
pass
def __lowerCAmelCase ( self ) ->Union[str, Any]:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Dict = _config_zero_init(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = UperNetForSemanticSegmentation.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
SCREAMING_SNAKE_CASE : Tuple = Image.open(a__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Any = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
SCREAMING_SNAKE_CASE : int = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Dict = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 313
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase_( a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def UpperCAmelCase_( a__ , a__ , a__=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE : Any = ''''''
else:
SCREAMING_SNAKE_CASE : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ViTMSNConfig()
SCREAMING_SNAKE_CASE : Optional[int] = 1_000
SCREAMING_SNAKE_CASE : str = '''datasets/huggingface/label-files'''
SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ ) , '''r''' ) )
SCREAMING_SNAKE_CASE : List[Any] = {int(a__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = idalabel
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = 384
SCREAMING_SNAKE_CASE : Any = 1_536
SCREAMING_SNAKE_CASE : List[str] = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = 1_024
SCREAMING_SNAKE_CASE : Optional[int] = 4_096
SCREAMING_SNAKE_CASE : Tuple = 24
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : Dict = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024
SCREAMING_SNAKE_CASE : List[Any] = 4_096
SCREAMING_SNAKE_CASE : List[Any] = 24
SCREAMING_SNAKE_CASE : Tuple = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNModel(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['''target_encoder''']
SCREAMING_SNAKE_CASE : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(a__ )
SCREAMING_SNAKE_CASE : Any = create_rename_keys(a__ , base_model=a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , base_model=a__ )
model.load_state_dict(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(a__ , stream=a__ ).raw )
SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=a__ , image_std=a__ )
SCREAMING_SNAKE_CASE : int = image_processor(images=a__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : Tuple = model(**a__ )
SCREAMING_SNAKE_CASE : str = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr'
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = use_timm_backbone
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = backbone
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : str = num_feature_levels
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points
SCREAMING_SNAKE_CASE : Any = decoder_n_points
SCREAMING_SNAKE_CASE : str = two_stage
SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : int = class_cost
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost
SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) ->int:
return self.d_model
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 313
| 1
|
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a__ : Any = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
a__ : Tuple = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
a__ : Union[str, Any] = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return float((preds == labels).mean() )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = simple_accuracy(a__ , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = float(fa_score(y_true=a__ , y_pred=a__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(a__ )
SCREAMING_SNAKE_CASE : int = np.array(a__ )
SCREAMING_SNAKE_CASE : int = en_sentvecs.shape[0]
# mean centering
SCREAMING_SNAKE_CASE : int = en_sentvecs - np.mean(a__ , axis=0 )
SCREAMING_SNAKE_CASE : List[Any] = in_sentvecs - np.mean(a__ , axis=0 )
SCREAMING_SNAKE_CASE : int = cdist(a__ , a__ , '''cosine''' )
SCREAMING_SNAKE_CASE : Optional[int] = np.array(range(a__ ) )
SCREAMING_SNAKE_CASE : Tuple = sim.argsort(axis=1 )[:, :10]
SCREAMING_SNAKE_CASE : Optional[int] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Dict:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_lowerCamelCase , _lowerCamelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_lowerCamelCase , _lowerCamelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 313
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,)
__SCREAMING_SNAKE_CASE : Optional[int] = 10
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCamelCase )
return config
def __lowerCAmelCase ( self ) ->Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = output.prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 313
| 1
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE : str = ord(a__ )
if not _is_chinese_char(a__ ):
return 0
return 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = set()
for token in tokens:
SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ )
if chinese_word:
word_set.add(a__ )
SCREAMING_SNAKE_CASE : str = list(a__ )
return word_list
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE : Tuple = bert_tokens
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ )
while start < end:
SCREAMING_SNAKE_CASE : Dict = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ )
for i in range(a__ , 1 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j]
SCREAMING_SNAKE_CASE : List[str] = start + i
SCREAMING_SNAKE_CASE : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res]
ltp_res.extend(a__ )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : Any = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : int = []
for input_ids, chinese_word in zip(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = []
for id in input_ids:
SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ )
input_tokens.append(a__ )
SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a__ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE : Optional[int] = token[2:]
# save chinese tokens' pos
if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ):
ref_id.append(a__ )
ref_ids.append(a__ )
assert len(a__ ) == len(a__ )
return ref_ids
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids]
f.writelines(a__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
a__ : int = parser.parse_args()
main(args)
| 313
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313
| 1
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
a__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->Optional[int]:
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE : Optional[Any] = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _lowerCamelCase , standard_warn=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(scheduler.config )
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = FrozenDict(_lowerCamelCase )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE : str = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _lowerCamelCase , standard_warn=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = dict(scheduler.config )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = FrozenDict(_lowerCamelCase )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_lowerCamelCase , segmentation_processor=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , )
def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
self.enable_attention_slicing(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
SCREAMING_SNAKE_CASE : int = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCamelCase , _lowerCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self ) ->List[str]:
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
SCREAMING_SNAKE_CASE : Any = self.segmentation_model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE : List[str] = self.numpy_to_pil(_lowerCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE : str = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , )
| 313
|
from __future__ import annotations
import math
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
SCREAMING_SNAKE_CASE : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
SCREAMING_SNAKE_CASE : str = len(a__ )
SCREAMING_SNAKE_CASE : Any = matrix_length // 2
SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : Optional[int] = [
[a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ )
]
SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return len(a__ ), len(matrix[0] )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
print('''\n'''.join(str(a__ ) for line in matrix ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ ) == (2, 2):
return default_matrix_multiplication(a__ , a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(a__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]:
SCREAMING_SNAKE_CASE : Any = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a__ )
SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ )
SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = matrixa
SCREAMING_SNAKE_CASE : Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ )
# Removing the additional zeros
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 313
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 313
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any:
SCREAMING_SNAKE_CASE : str = scheduler
SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches
SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer
SCREAMING_SNAKE_CASE : List[str] = GradientState()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes
for _ in range(_lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return self.scheduler.get_last_lr()
def __lowerCAmelCase ( self ) ->List[str]:
return self.scheduler.state_dict()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
self.scheduler.load_state_dict(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
return self.scheduler.get_lr()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]:
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
| 313
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ )
return k
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE : Dict = v.T
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE : int = task_specific_params
SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : Any = Path(args.tf_ckpt_path).parent.name
a__ : int = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=100 , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=[0, 1, 2, 3] , ) ->Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = 100
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Any = patch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Any = scope
SCREAMING_SNAKE_CASE : Any = out_indices
SCREAMING_SNAKE_CASE : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Any = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Optional[Any] = num_patches + 1
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = BeitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = BeitForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : List[str] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = BeitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = BeitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = BeitForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = BeitModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowerCAmelCase ( self ) ->Any:
pass
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_lowerCamelCase ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE : Any = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(**_lowerCamelCase ).loss
loss.backward()
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : int = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_lowerCamelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = model(**_lowerCamelCase ).loss
loss.backward()
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = _config_zero_init(_lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __lowerCAmelCase ( self ) ->int:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Any = BeitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(pixel_values=_lowerCamelCase , bool_masked_pos=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _lowerCamelCase , atol=1e-2 ) )
@slow
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Any = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : int = prepare_img()
SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : int = torch.Size((1, 1000) )
self.assertEqual(logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE : Optional[int] = 281
self.assertEqual(logits.argmax(-1 ).item() , _lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Any = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE : Optional[int] = 2396
self.assertEqual(logits.argmax(-1 ).item() , _lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
SCREAMING_SNAKE_CASE : Any = model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = BeitImageProcessor(do_resize=_lowerCamelCase , size=640 , do_center_crop=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(ds[0]['''file'''] )
SCREAMING_SNAKE_CASE : Any = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=_lowerCamelCase , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=_lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Optional[int] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
SCREAMING_SNAKE_CASE : Dict = model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = BeitImageProcessor(do_resize=_lowerCamelCase , size=640 , do_center_crop=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE : str = Image.open(ds[0]['''file'''] )
SCREAMING_SNAKE_CASE : Any = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : List[Any] = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,)
__SCREAMING_SNAKE_CASE : Optional[int] = 10
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCamelCase )
return config
def __lowerCAmelCase ( self ) ->Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = output.prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 313
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase_( a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def UpperCAmelCase_( a__ , a__ , a__=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE : Any = ''''''
else:
SCREAMING_SNAKE_CASE : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ViTMSNConfig()
SCREAMING_SNAKE_CASE : Optional[int] = 1_000
SCREAMING_SNAKE_CASE : str = '''datasets/huggingface/label-files'''
SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ ) , '''r''' ) )
SCREAMING_SNAKE_CASE : List[Any] = {int(a__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = idalabel
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = 384
SCREAMING_SNAKE_CASE : Any = 1_536
SCREAMING_SNAKE_CASE : List[str] = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = 1_024
SCREAMING_SNAKE_CASE : Optional[int] = 4_096
SCREAMING_SNAKE_CASE : Tuple = 24
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : Dict = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024
SCREAMING_SNAKE_CASE : List[Any] = 4_096
SCREAMING_SNAKE_CASE : List[Any] = 24
SCREAMING_SNAKE_CASE : Tuple = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNModel(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['''target_encoder''']
SCREAMING_SNAKE_CASE : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(a__ )
SCREAMING_SNAKE_CASE : Any = create_rename_keys(a__ , base_model=a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , base_model=a__ )
model.load_state_dict(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(a__ , stream=a__ ).raw )
SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=a__ , image_std=a__ )
SCREAMING_SNAKE_CASE : int = image_processor(images=a__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : Tuple = model(**a__ )
SCREAMING_SNAKE_CASE : str = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : int = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase_( a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
SCREAMING_SNAKE_CASE : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=99 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=0.0_2 , ) ->str:
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : List[Any] = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
SCREAMING_SNAKE_CASE : Tuple = pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Dict = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
SCREAMING_SNAKE_CASE : Any = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
SCREAMING_SNAKE_CASE : Any = shift_tokens_right(_lowerCamelCase , 1 , 2 )
SCREAMING_SNAKE_CASE : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Any = prepare_blenderbot_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : List[str] = model_class_name(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
SCREAMING_SNAKE_CASE : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
SCREAMING_SNAKE_CASE : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[int] = model.decode(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : List[str] = model_class_name(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = model.encode(inputs_dict['''input_ids'''] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
SCREAMING_SNAKE_CASE : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE : Tuple = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE : Tuple = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Tuple = model.decode(_lowerCamelCase , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class a_ ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 99
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Any = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE : int = input_ids.shape[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._get_config_and_data()
SCREAMING_SNAKE_CASE : str = FlaxBlenderbotForConditionalGeneration(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = lm_model(input_ids=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE : str = FlaxBlenderbotForConditionalGeneration(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE : int = lm_model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE : int = shift_tokens_right(_lowerCamelCase , 1 , 2 )
SCREAMING_SNAKE_CASE : str = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
SCREAMING_SNAKE_CASE : str = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class a_ ( a__ , unittest.TestCase , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : str = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE : Optional[int] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxBlenderbotModelTester(self )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : int = model_class(_lowerCamelCase )
@jax.jit
def encode_jitted(_lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
return model.encode(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE : str = encode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Any = encode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : List[str] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return model.decode(
decoder_input_ids=_lowerCamelCase , decoder_attention_mask=_lowerCamelCase , encoder_outputs=_lowerCamelCase , )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = decode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Dict = decode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCAmelCase ( self ) ->Optional[Any]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE : List[str] = np.ones((1, 1) ) * model.config.eos_token_id
SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Any = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
SCREAMING_SNAKE_CASE : Tuple = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
SCREAMING_SNAKE_CASE : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
SCREAMING_SNAKE_CASE : List[str] = ['''Sam''']
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors='''jax''' )
SCREAMING_SNAKE_CASE : str = model.generate(**_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = '''Sam is a great name. It means "sun" in Gaelic.'''
SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(_lowerCamelCase , **_lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 313
|
import csv
import tweepy
# Twitter API credentials
a__ : Union[str, Any] = ''''''
a__ : List[str] = ''''''
a__ : Any = ''''''
a__ : List[str] = ''''''
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ )
auth.set_access_token(a__ , a__ )
SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 )
# save most recent tweets
alltweets.extend(a__ )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a__ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE : Any = api.user_timeline(
screen_name=a__ , count=200 , max_id=a__ )
# save most recent tweets
alltweets.extend(a__ )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1
print(F"""...{len(a__ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(a__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 313
| 1
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class a_ ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = MODEL_FOR_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE : Dict = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE : Optional[Any] = text_generator('''This is a test''' , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
SCREAMING_SNAKE_CASE : int = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_lowerCamelCase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
SCREAMING_SNAKE_CASE : Tuple = text_generator('''This is a test''' , do_sample=_lowerCamelCase , num_return_sequences=2 , return_tensors=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{'''generated_token_ids''': ANY(_lowerCamelCase )},
{'''generated_token_ids''': ANY(_lowerCamelCase )},
] , )
SCREAMING_SNAKE_CASE : Tuple = text_generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE : str = '''<pad>'''
SCREAMING_SNAKE_CASE : List[Any] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_lowerCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=_lowerCamelCase , )
self.assertEqual(
_lowerCamelCase , [
[
{'''generated_token_ids''': ANY(_lowerCamelCase )},
{'''generated_token_ids''': ANY(_lowerCamelCase )},
],
[
{'''generated_token_ids''': ANY(_lowerCamelCase )},
{'''generated_token_ids''': ANY(_lowerCamelCase )},
],
] , )
@require_tf
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE : Tuple = text_generator('''This is a test''' , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
SCREAMING_SNAKE_CASE : Tuple = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : str = TextGenerationPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
return text_generator, ["This is a test", "Another test"]
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = '''Hello I believe in'''
SCREAMING_SNAKE_CASE : str = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE : Optional[int] = text_generator(_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
SCREAMING_SNAKE_CASE : Optional[int] = text_generator(_lowerCamelCase , stop_sequence=''' fe''' )
self.assertEqual(_lowerCamelCase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] = text_generator.model
SCREAMING_SNAKE_CASE : Tuple = text_generator.tokenizer
SCREAMING_SNAKE_CASE : Tuple = text_generator('''This is a test''' )
self.assertEqual(_lowerCamelCase , [{'''generated_text''': ANY(_lowerCamelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
SCREAMING_SNAKE_CASE : List[Any] = text_generator('''This is a test''' , return_full_text=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [{'''generated_text''': ANY(_lowerCamelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
SCREAMING_SNAKE_CASE : Tuple = pipeline(task='''text-generation''' , model=_lowerCamelCase , tokenizer=_lowerCamelCase , return_full_text=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = text_generator('''This is a test''' )
self.assertEqual(_lowerCamelCase , [{'''generated_text''': ANY(_lowerCamelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
SCREAMING_SNAKE_CASE : Union[str, Any] = text_generator('''This is a test''' , return_full_text=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [{'''generated_text''': ANY(_lowerCamelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[{'''generated_text''': ANY(_lowerCamelCase )}, {'''generated_text''': ANY(_lowerCamelCase )}],
[{'''generated_text''': ANY(_lowerCamelCase )}, {'''generated_text''': ANY(_lowerCamelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
SCREAMING_SNAKE_CASE : List[Any] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[{'''generated_text''': ANY(_lowerCamelCase )}, {'''generated_text''': ANY(_lowerCamelCase )}],
[{'''generated_text''': ANY(_lowerCamelCase )}, {'''generated_text''': ANY(_lowerCamelCase )}],
] , )
with self.assertRaises(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : int = text_generator('''test''' , return_full_text=_lowerCamelCase , return_text=_lowerCamelCase )
with self.assertRaises(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = text_generator('''test''' , return_full_text=_lowerCamelCase , return_tensors=_lowerCamelCase )
with self.assertRaises(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = text_generator('''test''' , return_text=_lowerCamelCase , return_tensors=_lowerCamelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
SCREAMING_SNAKE_CASE : Optional[int] = text_generator('''''' )
self.assertEqual(_lowerCamelCase , [{'''generated_text''': ANY(_lowerCamelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
SCREAMING_SNAKE_CASE : Optional[Any] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
SCREAMING_SNAKE_CASE : str = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
SCREAMING_SNAKE_CASE : str = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_lowerCamelCase ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self ) ->Optional[Any]:
import torch
# Classic `model_kwargs`
SCREAMING_SNAKE_CASE : Dict = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE : Any = pipe('''This is a test''' )
self.assertEqual(
_lowerCamelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE : Any = pipe('''This is a test''' )
self.assertEqual(
_lowerCamelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
SCREAMING_SNAKE_CASE : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = pipe('''This is a test''' )
self.assertEqual(
_lowerCamelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __lowerCAmelCase ( self ) ->Union[str, Any]:
import torch
SCREAMING_SNAKE_CASE : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self ) ->Tuple:
import torch
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_lowerCamelCase , top_p=0.5 )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Dict = '''Hello world'''
SCREAMING_SNAKE_CASE : Dict = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger('''transformers.generation.utils''' )
SCREAMING_SNAKE_CASE : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_lowerCamelCase ) as cl:
SCREAMING_SNAKE_CASE : str = text_generator(_lowerCamelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(_lowerCamelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_lowerCamelCase ) as cl:
SCREAMING_SNAKE_CASE : str = text_generator(_lowerCamelCase , max_new_tokens=1 )
self.assertNotIn(_lowerCamelCase , cl.out )
with CaptureLogger(_lowerCamelCase ) as cl:
SCREAMING_SNAKE_CASE : Optional[Any] = text_generator(_lowerCamelCase , max_length=10 )
self.assertNotIn(_lowerCamelCase , cl.out )
| 313
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'ibert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Dict = force_dequant
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313
| 1
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class a_ ( a__ ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''num_attention_heads''' ) )
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=64 , _lowerCamelCase=3 , _lowerCamelCase=3 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=16 , _lowerCamelCase=[128, 256, 384] , _lowerCamelCase=[4, 6, 8] , _lowerCamelCase=[2, 3, 4] , _lowerCamelCase=[16, 16, 16] , _lowerCamelCase=0 , _lowerCamelCase=[2, 2, 2] , _lowerCamelCase=[2, 2, 2] , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=2 , ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = stride
SCREAMING_SNAKE_CASE : int = padding
SCREAMING_SNAKE_CASE : Optional[int] = hidden_sizes
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = depths
SCREAMING_SNAKE_CASE : int = key_dim
SCREAMING_SNAKE_CASE : Any = drop_path_rate
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Dict = attention_ratio
SCREAMING_SNAKE_CASE : int = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->Tuple:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = LevitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : Optional[int] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE : str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : Tuple = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = LevitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : int = False
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : int = LevitModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->Any:
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->str:
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[int] = len(self.model_tester.depths ) + 1
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : Tuple = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE : int = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = model(**_lowerCamelCase ).loss
loss.backward()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE : Tuple = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(**_lowerCamelCase ).loss
loss.backward()
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = problem_type['''title''']
SCREAMING_SNAKE_CASE : Optional[Any] = problem_type['''num_labels''']
SCREAMING_SNAKE_CASE : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE : List[Any] = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
SCREAMING_SNAKE_CASE : Optional[int] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
SCREAMING_SNAKE_CASE : Any = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __lowerCAmelCase ( self ) ->Dict:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LevitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) ->List[str]:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**_lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
| 313
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313
| 1
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
from maths.prime_check import is_prime
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(a__ )
if is_prime(a__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a__ : Optional[Any] = '''src/transformers'''
a__ : Dict = '''docs/source/en/tasks'''
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
with open(a__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while not lines[start_index].startswith(a__ ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = start_index
while not lines[end_index].startswith(a__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a__ : str = direct_transformers_import(TRANSFORMERS_PATH)
a__ : Any = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a__ : Tuple = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE : List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(a__ , set() )
SCREAMING_SNAKE_CASE : Dict = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCAmelCase_( a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = _find_text_in_file(
filename=os.path.join(a__ , a__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
SCREAMING_SNAKE_CASE : Optional[int] = get_model_list_for_task(a__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(a__ , a__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
''' to fix this.''' )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a__ : List[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[str] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 32
@property
def __lowerCAmelCase ( self ) ->str:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Dict:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Tuple:
return 100
@property
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = self.dummy_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : List[str] = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 313
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a__ : List[str] = '''CompVis/stable-diffusion-v1-1'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
a__ : Any = '''CompVis/stable-diffusion-v1-3'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4'''
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str:
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) ->Dict[str, Any]:
return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 313
| 1
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
a__ : Any = logging.getLogger(__name__)
def UpperCAmelCase_( a__=2 , a__=3 , a__=16 , a__ = 10 , a__ = 2 ):
"""simple docstring"""
def get_dataset(a__ ):
SCREAMING_SNAKE_CASE : Tuple = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(a__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
SCREAMING_SNAKE_CASE : Optional[int] = get_dataset(a__ )
SCREAMING_SNAKE_CASE : Dict = get_dataset(a__ )
SCREAMING_SNAKE_CASE : List[Any] = DataLoader(a__ , shuffle=a__ , batch_size=a__ , num_workers=4 )
SCREAMING_SNAKE_CASE : Tuple = DataLoader(a__ , shuffle=a__ , batch_size=a__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , a__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for epoch in range(a__ ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = batch
SCREAMING_SNAKE_CASE : Optional[int] = model(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.nn.functional.mse_loss(a__ , a__ )
accelerator.backward(a__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self ) ->Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
return x * self.a + self.b
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE : Union[str, Any] = DummyModel()
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = dummy_dataloaders()
SCREAMING_SNAKE_CASE : List[str] = ProjectConfiguration(total_limit=1 , project_dir=_lowerCamelCase , automatic_checkpoint_naming=_lowerCamelCase )
# Train baseline
SCREAMING_SNAKE_CASE : Dict = Accelerator(project_config=_lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) ->Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE : List[Any] = DummyModel()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE : int = Accelerator()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save initial
SCREAMING_SNAKE_CASE : int = os.path.join(_lowerCamelCase , '''initial''' )
accelerator.save_state(_lowerCamelCase )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Union[str, Any] = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE : str = optimizer.state_dict()
SCREAMING_SNAKE_CASE : Optional[Any] = train(3 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Dict = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE : List[str] = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE : List[str] = DummyModel()
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = dummy_dataloaders()
SCREAMING_SNAKE_CASE : Any = Accelerator()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
accelerator.load_state(_lowerCamelCase )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Tuple = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE : int = optimizer.state_dict()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = train(2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save everything
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowerCamelCase , '''checkpoint''' )
accelerator.save_state(_lowerCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_lowerCamelCase )
test_rands += train(1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Any = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE : Optional[int] = optimizer.state_dict()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE : Any = DummyModel()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dummy_dataloaders()
SCREAMING_SNAKE_CASE : str = ProjectConfiguration(automatic_checkpoint_naming=_lowerCamelCase )
# Train baseline
SCREAMING_SNAKE_CASE : List[Any] = Accelerator(project_dir=_lowerCamelCase , project_config=_lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : List[str] = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE : Union[str, Any] = optimizer.state_dict()
SCREAMING_SNAKE_CASE : Optional[Any] = train(3 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : str = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE : str = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE : Dict = DummyModel()
SCREAMING_SNAKE_CASE : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = dummy_dataloaders()
SCREAMING_SNAKE_CASE : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = Accelerator(project_dir=_lowerCamelCase , project_config=_lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
accelerator.load_state(os.path.join(_lowerCamelCase , '''checkpoints''' , '''checkpoint_0''' ) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Tuple = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE : Optional[Any] = optimizer.state_dict()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = train(2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_lowerCamelCase , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Any = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE : Optional[int] = optimizer.state_dict()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE : Optional[int] = DummyModel()
SCREAMING_SNAKE_CASE : str = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE : Optional[int] = Accelerator()
with self.assertRaises(_lowerCamelCase ) as ve:
accelerator.register_for_checkpointing(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : int = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) ->str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE : Union[str, Any] = DummyModel()
SCREAMING_SNAKE_CASE : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE : Any = torch.optim.lr_scheduler.StepLR(_lowerCamelCase , step_size=1 , gamma=0.9_9 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dummy_dataloaders()
SCREAMING_SNAKE_CASE : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_lowerCamelCase )
# Train baseline
SCREAMING_SNAKE_CASE : Any = Accelerator(project_dir=_lowerCamelCase , project_config=_lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE : Any = scheduler.state_dict()
train(3 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.assertNotEqual(_lowerCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_lowerCamelCase , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(_lowerCamelCase , scheduler.state_dict() )
def __lowerCAmelCase ( self ) ->str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE : str = DummyModel()
SCREAMING_SNAKE_CASE : Any = ProjectConfiguration(automatic_checkpoint_naming=_lowerCamelCase , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE : Dict = Accelerator(project_dir=_lowerCamelCase , project_config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = accelerator.prepare(_lowerCamelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_lowerCamelCase , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : int = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
a__ : Any = '''/tmp/accelerate/state_checkpointing'''
a__ : Dict = DummyModel()
a__ : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
a__ : Optional[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
a__ , a__ : Optional[Any] = dummy_dataloaders()
a__ : List[str] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
a__ : Tuple = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
a__ , a__ , a__ , a__ , a__ : Union[str, Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
a__ , a__ : List[str] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
a__ : Dict = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
a__ : Optional[int] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
a__ : Any = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
a__ : List[str] = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 313
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : Union[str, Any] = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 'switch_transformers'
__SCREAMING_SNAKE_CASE : List[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : str = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , _lowerCamelCase=3_2128 , _lowerCamelCase=768 , _lowerCamelCase=64 , _lowerCamelCase=2048 , _lowerCamelCase=64 , _lowerCamelCase=12 , _lowerCamelCase=3 , _lowerCamelCase=12 , _lowerCamelCase=3 , _lowerCamelCase=12 , _lowerCamelCase=8 , _lowerCamelCase=False , _lowerCamelCase=0.0_1 , _lowerCamelCase="float32" , _lowerCamelCase=False , _lowerCamelCase=32 , _lowerCamelCase=128 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-6 , _lowerCamelCase=0.0_0_1 , _lowerCamelCase=0.0_0_1 , _lowerCamelCase=1.0 , _lowerCamelCase="relu" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , **_lowerCamelCase , ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : int = d_model
SCREAMING_SNAKE_CASE : Union[str, Any] = d_kv
SCREAMING_SNAKE_CASE : str = d_ff
SCREAMING_SNAKE_CASE : Dict = num_sparse_encoder_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
SCREAMING_SNAKE_CASE : Any = self.num_layers // self.num_sparse_encoder_layers
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
SCREAMING_SNAKE_CASE : int = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
SCREAMING_SNAKE_CASE : Any = self.num_decoder_layers # HACK: this will create 0 sparse layers
SCREAMING_SNAKE_CASE : Tuple = num_heads
SCREAMING_SNAKE_CASE : Any = num_experts
SCREAMING_SNAKE_CASE : str = expert_capacity
SCREAMING_SNAKE_CASE : Dict = router_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = router_dtype
SCREAMING_SNAKE_CASE : int = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention_num_buckets
SCREAMING_SNAKE_CASE : str = relative_attention_max_distance
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : List[Any] = initializer_factor
SCREAMING_SNAKE_CASE : Optional[Any] = feed_forward_proj
SCREAMING_SNAKE_CASE : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE : Dict = add_router_probs
SCREAMING_SNAKE_CASE : Tuple = router_z_loss_coef
SCREAMING_SNAKE_CASE : Any = router_aux_loss_coef
SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
SCREAMING_SNAKE_CASE : Optional[Any] = act_info[-1]
SCREAMING_SNAKE_CASE : Union[str, Any] = act_info[0] == '''gated'''
if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE : str = '''gelu_new'''
super().__init__(
pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase , )
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Tuple = frozenset([] )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = 32
SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL()
SCREAMING_SNAKE_CASE : Optional[Any] = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if pil_image:
SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase )
inputs.update({'''image_embeds''': None} )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) ->Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Dict = pipe(
_lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 313
| 1
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
a__ : Dict = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
a__ : List[Any] = '''main'''
# Default branch name
a__ : Dict = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
a__ : List[Any] = '''aaaaaaa'''
# This commit does not exist, so we should 404.
a__ : List[Any] = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
a__ : Dict = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def UpperCAmelCase_( ):
"""simple docstring"""
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def UpperCAmelCase_( ):
"""simple docstring"""
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Any:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class a_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def __lowerCAmelCase ( self ) ->Tuple:
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''start_positions''', '''end_positions'''] )
class a_ ( a__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
@require_tf
def __lowerCAmelCase ( self ) ->Any:
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''start_positions''', '''end_positions'''] )
class a_ ( a__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
@require_flax
def __lowerCAmelCase ( self ) ->str:
# Flax models don't have labels
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
class a_ ( a__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
| 313
|
from abc import ABC, abstractmethod
from typing import List, Optional
class a_ ( a__ ):
"""simple docstring"""
def __init__( self ) ->List[str]:
# test for the above condition
self.test()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : List[Any] = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Union[str, Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids )
SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : str = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self ) ->Any:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict:
SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Dict = self.seqlen
SCREAMING_SNAKE_CASE : int = self.fulfilled_idx
SCREAMING_SNAKE_CASE : Tuple = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict:
SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : List[str] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Tuple = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = root
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : int = start[current_token]
SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = nested_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[Any] = completed
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = []
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]:
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : int = self.current_seq
SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : str = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str:
SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : str = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 313
| 1
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
a__ : List[Any] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
a__ : Tuple = get_tests_dir('''fixtures/vocab.json''')
a__ : List[str] = get_tests_dir('''fixtures''')
class a_ ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = 0
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : int = WavaVecaConfig()
SCREAMING_SNAKE_CASE : Tuple = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = AutoProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
copyfile(_lowerCamelCase , os.path.join(_lowerCamelCase , '''vocab.json''' ) )
SCREAMING_SNAKE_CASE : Dict = AutoProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[str] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
SCREAMING_SNAKE_CASE : str = WavaVecaProcessor(_lowerCamelCase , _lowerCamelCase )
# save in new folder
processor.save_pretrained(_lowerCamelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE : int = json.load(_lowerCamelCase )
config_dict.pop('''processor_class''' )
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , '''w''' ) as f:
f.write(json.dumps(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = AutoProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[str] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
SCREAMING_SNAKE_CASE : Tuple = WavaVecaProcessor(_lowerCamelCase , _lowerCamelCase )
# save in new folder
processor.save_pretrained(_lowerCamelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Any = json.load(_lowerCamelCase )
config_dict.pop('''processor_class''' )
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , '''w''' ) as f:
f.write(json.dumps(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = AutoProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(_lowerCamelCase )
# copy relevant files
copyfile(_lowerCamelCase , os.path.join(_lowerCamelCase , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , '''w''' ) as f:
f.write('''{}''' )
SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : int = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_lowerCamelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
SCREAMING_SNAKE_CASE : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
SCREAMING_SNAKE_CASE : Optional[int] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_lowerCamelCase , use_fast=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __lowerCAmelCase ( self ) ->Any:
try:
AutoConfig.register('''custom''' , _lowerCamelCase )
AutoFeatureExtractor.register(_lowerCamelCase , _lowerCamelCase )
AutoTokenizer.register(_lowerCamelCase , slow_tokenizer_class=_lowerCamelCase )
AutoProcessor.register(_lowerCamelCase , _lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCamelCase ):
AutoProcessor.register(_lowerCamelCase , _lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Union[str, Any] = CustomFeatureExtractor.from_pretrained(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : List[str] = os.path.join(_lowerCamelCase , '''vocab.txt''' )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE : List[str] = CustomTokenizer(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = CustomProcessor(_lowerCamelCase , _lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = AutoProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) ->List[str]:
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = False
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'AutoFeatureExtractor'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : int = False
try:
AutoConfig.register('''custom''' , _lowerCamelCase )
AutoFeatureExtractor.register(_lowerCamelCase , _lowerCamelCase )
AutoTokenizer.register(_lowerCamelCase , slow_tokenizer_class=_lowerCamelCase )
AutoProcessor.register(_lowerCamelCase , _lowerCamelCase )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE : str = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_lowerCamelCase )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE : List[Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_lowerCamelCase )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def __lowerCAmelCase ( cls ) ->str:
SCREAMING_SNAKE_CASE : List[Any] = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def __lowerCAmelCase ( cls ) ->int:
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = WavaVecaProcessor.from_pretrained(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_lowerCamelCase , '''test-processor''' ) , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(new_processor.feature_extractor , _lowerCamelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = WavaVecaProcessor.from_pretrained(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_lowerCamelCase , '''test-processor-org''' ) , push_to_hub=_lowerCamelCase , use_auth_token=self._token , organization='''valid_org''' , )
SCREAMING_SNAKE_CASE : Any = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(new_processor.feature_extractor , _lowerCamelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __lowerCAmelCase ( self ) ->str:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Tuple = CustomFeatureExtractor.from_pretrained(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowerCamelCase , '''vocab.txt''' )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE : Dict = CustomTokenizer(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = CustomProcessor(_lowerCamelCase , _lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE : Optional[Any] = Repository(_lowerCamelCase , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(_lowerCamelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_lowerCamelCase , '''tokenizer_config.json''' ) ) as f:
SCREAMING_SNAKE_CASE : List[Any] = json.load(_lowerCamelCase )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_lowerCamelCase , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_lowerCamelCase , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_lowerCamelCase , '''custom_processing.py''' ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE : Tuple = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 313
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets(
a__ , a__ , number=a__ , min_len=1_026 , trim=a__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ )
print('''computing perplexity on objective set''' )
SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item()
print('''perplexity on objective set:''' , a__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ )
# Train secondary learner
SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner(
a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ )
SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ )
SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(a__ )
secondary_learner.eval()
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Tuple = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
for epoch in range(int(a__ ) ):
for step, example in enumerate(a__ ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward(
torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE : str = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE : List[str] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=a__ , default=a__ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=a__ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=a__ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner(
a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 313
| 1
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
a__ : Any = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
a__ : Any = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : str = collections.OrderedDict()
SCREAMING_SNAKE_CASE : Any = collections.OrderedDict()
SCREAMING_SNAKE_CASE : List[str] = collections.OrderedDict()
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : int = f.readlines()
SCREAMING_SNAKE_CASE : Dict = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(a__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = b
SCREAMING_SNAKE_CASE : List[str] = idx
for wd in b:
SCREAMING_SNAKE_CASE : Optional[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Dict = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase="<|startoftext|>" , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase=False , **_lowerCamelCase , ) ->Any:
super().__init__(
unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , do_clean_text=_lowerCamelCase , **_lowerCamelCase , )
if not os.path.isfile(_lowerCamelCase ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(_lowerCamelCase ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
SCREAMING_SNAKE_CASE : str = do_clean_text
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = load_vocab_and_emoji(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __lowerCAmelCase ( self ) ->str:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
return self.subword_tokenizer.tokenize(_lowerCamelCase , clean=self.do_clean_text )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
return self.vocab.get(_lowerCamelCase , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
return self.subword_tokenizer.convert_id_to_token(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''.join(_lowerCamelCase ).strip()
return out_string
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[int]:
SCREAMING_SNAKE_CASE : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE : Tuple = input_ids[-self.model_max_length :]
return input_ids
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = 0
if os.path.isdir(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : int = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
SCREAMING_SNAKE_CASE : Tuple = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
SCREAMING_SNAKE_CASE : Optional[Any] = token_index
writer.write(''','''.join(_lowerCamelCase ) + '''\n''' )
index += 1
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , _lowerCamelCase )
return vocab_file, emoji_file
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab # same as swe
SCREAMING_SNAKE_CASE : List[Any] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE : Tuple = emoji
SCREAMING_SNAKE_CASE : str = np.max([len(_lowerCamelCase ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
SCREAMING_SNAKE_CASE : str = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
SCREAMING_SNAKE_CASE : Dict = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
SCREAMING_SNAKE_CASE : Tuple = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
SCREAMING_SNAKE_CASE : int = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
SCREAMING_SNAKE_CASE : Tuple = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
SCREAMING_SNAKE_CASE : List[Any] = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
SCREAMING_SNAKE_CASE : int = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
SCREAMING_SNAKE_CASE : List[Any] = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ) ->Union[str, Any]:
return len(self.ids_to_tokens )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.content_repattera.sub('''<URL>''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = self.content_repattera.sub('''<EMAIL>''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.content_repattera.sub('''<TEL>''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.content_repattera.sub('''<DATE>''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.content_repattera.sub('''<DATE>''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.content_repattera.sub('''<PRICE>''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE : int = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=False ) ->Optional[int]:
SCREAMING_SNAKE_CASE : str = text.replace(''' ''' , '''<SP>''' )
SCREAMING_SNAKE_CASE : Optional[int] = text.replace(''' ''' , '''<SP>''' )
SCREAMING_SNAKE_CASE : Optional[int] = text.replace('''\r\n''' , '''<BR>''' )
SCREAMING_SNAKE_CASE : Optional[int] = text.replace('''\n''' , '''<BR>''' )
SCREAMING_SNAKE_CASE : int = text.replace('''\r''' , '''<BR>''' )
SCREAMING_SNAKE_CASE : Optional[Any] = text.replace('''\t''' , '''<TAB>''' )
SCREAMING_SNAKE_CASE : Tuple = text.replace('''—''' , '''ー''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE : str = text.replace(_lowerCamelCase , _lowerCamelCase )
if clean:
SCREAMING_SNAKE_CASE : List[Any] = self.clean_text(_lowerCamelCase )
def check_simbol(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : Any = x.encode()
if len(_lowerCamelCase ) == 1 and len(_lowerCamelCase ) == 2:
SCREAMING_SNAKE_CASE : Union[str, Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2a1 and c <= 0xc_2bf)
or (c >= 0xc_780 and c <= 0xc_783)
or (c >= 0xc_ab9 and c <= 0xc_bbf)
or (c >= 0xc_c80 and c <= 0xc_da2)
):
return True
return False
def checkuae(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = x.encode()
if len(_lowerCamelCase ) == 1 and len(_lowerCamelCase ) == 3:
SCREAMING_SNAKE_CASE : List[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28_080 and c <= 0xe2b_07f:
return True
return False
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : List[str] = []
while pos < len(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = min(len(_lowerCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
SCREAMING_SNAKE_CASE : Optional[int] = [] # (token_id, token, pos)
for e in range(_lowerCamelCase , _lowerCamelCase , -1 ):
SCREAMING_SNAKE_CASE : Optional[Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_lowerCamelCase ) > 2:
SCREAMING_SNAKE_CASE : Any = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_lowerCamelCase ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[0] )[0]
result.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = e
else:
SCREAMING_SNAKE_CASE : List[str] = pos + 1
SCREAMING_SNAKE_CASE : Dict = text[pos:end]
if check_simbol(_lowerCamelCase ):
result.append('''<KIGOU>''' )
elif checkuae(_lowerCamelCase ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
SCREAMING_SNAKE_CASE : Optional[Any] = end
return result
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase="\n" ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_lowerCamelCase ) > 0:
words.append(bytearray(_lowerCamelCase ).decode('''utf-8''' , errors='''replace''' ) )
SCREAMING_SNAKE_CASE : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(_lowerCamelCase )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
words.append(bytearray(_lowerCamelCase ).decode('''utf-8''' , errors='''replace''' ) )
SCREAMING_SNAKE_CASE : List[str] = ''''''.join(_lowerCamelCase )
return text
| 313
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = filter(lambda a__ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Any = logging.getLogger(__name__)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if metric == "rouge2":
SCREAMING_SNAKE_CASE : str = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
SCREAMING_SNAKE_CASE : List[Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
SCREAMING_SNAKE_CASE : int = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
SCREAMING_SNAKE_CASE : int = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
SCREAMING_SNAKE_CASE : Dict = ModelCheckpoint(
dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a__ , verbose=a__ , )
class a_ ( pl.Callback ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE : Any = od / '''test_results.txt'''
SCREAMING_SNAKE_CASE : Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
SCREAMING_SNAKE_CASE : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , '''a+''' ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE : Tuple = metrics[key]
if isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = val.item()
SCREAMING_SNAKE_CASE : Tuple = F"""{key}: {val:.6f}\n"""
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
try:
SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE : Optional[int] = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE : int = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 313
| 1
|
import numpy as np
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE : str = ord(a__ )
if not _is_chinese_char(a__ ):
return 0
return 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = set()
for token in tokens:
SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ )
if chinese_word:
word_set.add(a__ )
SCREAMING_SNAKE_CASE : str = list(a__ )
return word_list
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE : Tuple = bert_tokens
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ )
while start < end:
SCREAMING_SNAKE_CASE : Dict = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ )
for i in range(a__ , 1 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j]
SCREAMING_SNAKE_CASE : List[str] = start + i
SCREAMING_SNAKE_CASE : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res]
ltp_res.extend(a__ )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : Any = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : int = []
for input_ids, chinese_word in zip(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = []
for id in input_ids:
SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ )
input_tokens.append(a__ )
SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a__ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE : Optional[int] = token[2:]
# save chinese tokens' pos
if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ):
ref_id.append(a__ )
ref_ids.append(a__ )
assert len(a__ ) == len(a__ )
return ref_ids
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids]
f.writelines(a__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
a__ : int = parser.parse_args()
main(args)
| 313
| 1
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a_ ( unittest.TestCase , a__ ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = load_tool('''text-to-speech''' )
self.tool.setup()
def __lowerCAmelCase ( self ) ->Any:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = self.tool('''hey''' )
SCREAMING_SNAKE_CASE : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def __lowerCAmelCase ( self ) ->Optional[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = self.tool('''hey''' )
SCREAMING_SNAKE_CASE : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 313
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Tuple = '''1'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Dict = '''1'''
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : Dict = '''alsa'''
SCREAMING_SNAKE_CASE : Any = '''default'''
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation'''
SCREAMING_SNAKE_CASE : Optional[int] = ''':0'''
elif system == "Windows":
SCREAMING_SNAKE_CASE : int = '''dshow'''
SCREAMING_SNAKE_CASE : Any = '''default'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s
SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Any = np.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = b''''''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
SCREAMING_SNAKE_CASE : List[str] = False
yield item
SCREAMING_SNAKE_CASE : Dict = stride_left
SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
yield item
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 313
| 1
|
def UpperCAmelCase_( a__ = 100 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 313
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
| 1
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(a__ , (list, tuple) ) or not all(
isinstance(a__ , a__ ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
SCREAMING_SNAKE_CASE : Tuple = numbers[0]
for i in range(1 , len(a__ ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE : int = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = min_till_now, max_till_now
SCREAMING_SNAKE_CASE : str = max(a__ , max_till_now * number )
SCREAMING_SNAKE_CASE : Any = min(a__ , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE : Any = max(a__ , a__ )
return max_prod
| 313
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr'
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = use_timm_backbone
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = backbone
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : str = num_feature_levels
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points
SCREAMING_SNAKE_CASE : Any = decoder_n_points
SCREAMING_SNAKE_CASE : str = two_stage
SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : int = class_cost
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost
SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) ->int:
return self.d_model
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 313
| 1
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = input_paths_and_base_extractors[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE : Dict = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(a__ )
assert base_extractor.is_extractable(a__ )
SCREAMING_SNAKE_CASE : Tuple = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(a__ , a__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE : Union[str, Any] = file_path.read_text(encoding='''utf-8''' )
else:
SCREAMING_SNAKE_CASE : List[str] = output_path.read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : List[str] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
SCREAMING_SNAKE_CASE : Tuple = input_paths[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE : str = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(a__ )
SCREAMING_SNAKE_CASE : Dict = Extractor.infer_extractor_format(a__ )
assert extractor_format is not None
SCREAMING_SNAKE_CASE : Dict = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(a__ , a__ , a__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE : Union[str, Any] = file_path.read_text(encoding='''utf-8''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = output_path.read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : List[str] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
import tarfile
SCREAMING_SNAKE_CASE : List[Any] = tmp_path / '''data_dot_dot'''
directory.mkdir()
SCREAMING_SNAKE_CASE : int = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(a__ , '''w''' ) as f:
f.add(a__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def UpperCAmelCase_( a__ ):
"""simple docstring"""
import tarfile
SCREAMING_SNAKE_CASE : List[str] = tmp_path / '''data_sym_link'''
directory.mkdir()
SCREAMING_SNAKE_CASE : int = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=a__ )
with tarfile.TarFile(a__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
SCREAMING_SNAKE_CASE : Any = insecure_tar_files[insecure_tar_file]
SCREAMING_SNAKE_CASE : Dict = tmp_path / '''extracted'''
TarExtractor.extract(a__ , a__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
SCREAMING_SNAKE_CASE : Union[str, Any] = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(a__ )
assert zipfile.is_zipfile(str(a__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(a__ ) # but we're right
| 313
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,)
__SCREAMING_SNAKE_CASE : Optional[int] = 10
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCamelCase )
return config
def __lowerCAmelCase ( self ) ->Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = output.prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 313
| 1
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = True , ) ->Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(_lowerCamelCase , _lowerCamelCase , self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = controlnet(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_lowerCamelCase , _lowerCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_lowerCamelCase , is_main_process=_lowerCamelCase , save_function=_lowerCamelCase , safe_serialization=_lowerCamelCase , variant=_lowerCamelCase , )
idx += 1
SCREAMING_SNAKE_CASE : Optional[int] = model_path_to_save + F"""_{idx}"""
@classmethod
def __lowerCAmelCase ( cls , _lowerCamelCase , **_lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : List[str] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = ControlNetModel.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
controlnets.append(_lowerCamelCase )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(_lowerCamelCase )} controlnets loaded from {pretrained_model_path}.""" )
if len(_lowerCamelCase ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(_lowerCamelCase )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(_lowerCamelCase )
| 313
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313
| 1
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
a__ : Tuple = logging.getLogger(__name__)
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) ->Optional[int]:
super().__init__(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : List[str] = None
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
SCREAMING_SNAKE_CASE : Optional[int] = self._infer_socket_ifname()
# avoid clash with the NCCL port
SCREAMING_SNAKE_CASE : Tuple = str(distributed_port + 1 )
SCREAMING_SNAKE_CASE : str = dist.new_group(ranks=_lowerCamelCase , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __lowerCAmelCase ( self ) ->List[Any]:
return dist.get_rank(group=self.process_group ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=torch.floataa ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = torch.empty(_lowerCamelCase , dtype=_lowerCamelCase )
dist.scatter(_lowerCamelCase , src=0 , scatter_list=_lowerCamelCase , group=self.process_group )
return target_tensor
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
SCREAMING_SNAKE_CASE : str = next((addr for addr in addrs if addr.startswith('''e''' )) , _lowerCamelCase )
return ifname
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCamelCase )
# distributed training
SCREAMING_SNAKE_CASE : str = dist.get_world_size(group=self.process_group )
# gather logic
SCREAMING_SNAKE_CASE : Dict = None
if self._is_main():
SCREAMING_SNAKE_CASE : List[str] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_lowerCamelCase )]
dist.gather(torch.tensor(_lowerCamelCase ) , dst=0 , gather_list=_lowerCamelCase , group=self.process_group )
# scatter logic
SCREAMING_SNAKE_CASE : int = question_hidden_states.shape[0]
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Optional[int] = []
if self._is_main():
assert len(_lowerCamelCase ) == world_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self._main_retrieve(torch.cat(_lowerCamelCase ).numpy() , _lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_lowerCamelCase ), torch.tensor(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self._chunk_tensor(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self._chunk_tensor(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self._scattered(_lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(_lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_lowerCamelCase )
| 313
|
from __future__ import annotations
import math
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
SCREAMING_SNAKE_CASE : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
SCREAMING_SNAKE_CASE : str = len(a__ )
SCREAMING_SNAKE_CASE : Any = matrix_length // 2
SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : Optional[int] = [
[a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ )
]
SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return len(a__ ), len(matrix[0] )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
print('''\n'''.join(str(a__ ) for line in matrix ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ ) == (2, 2):
return default_matrix_multiplication(a__ , a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(a__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]:
SCREAMING_SNAKE_CASE : Any = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a__ )
SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ )
SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = matrixa
SCREAMING_SNAKE_CASE : Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ )
# Removing the additional zeros
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 313
| 1
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets(
a__ , a__ , number=a__ , min_len=1_026 , trim=a__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ )
print('''computing perplexity on objective set''' )
SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item()
print('''perplexity on objective set:''' , a__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ )
# Train secondary learner
SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner(
a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ )
SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ )
SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(a__ )
secondary_learner.eval()
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Tuple = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
for epoch in range(int(a__ ) ):
for step, example in enumerate(a__ ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward(
torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE : str = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE : List[str] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=a__ , default=a__ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=a__ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=a__ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner(
a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 313
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any:
SCREAMING_SNAKE_CASE : str = scheduler
SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches
SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer
SCREAMING_SNAKE_CASE : List[str] = GradientState()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes
for _ in range(_lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return self.scheduler.get_last_lr()
def __lowerCAmelCase ( self ) ->List[str]:
return self.scheduler.state_dict()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
self.scheduler.load_state_dict(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
return self.scheduler.get_lr()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]:
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
| 313
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : str = logging.get_logger(__name__)
a__ : Union[str, Any] = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a_ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'nat'
__SCREAMING_SNAKE_CASE : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _lowerCamelCase=4 , _lowerCamelCase=3 , _lowerCamelCase=64 , _lowerCamelCase=[3, 4, 6, 5] , _lowerCamelCase=[2, 4, 8, 16] , _lowerCamelCase=7 , _lowerCamelCase=3.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ) ->str:
super().__init__(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Dict = embed_dim
SCREAMING_SNAKE_CASE : Tuple = depths
SCREAMING_SNAKE_CASE : Any = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : int = qkv_bias
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = drop_path_rate
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : List[Any] = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
SCREAMING_SNAKE_CASE : List[Any] = layer_scale_init_value
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_lowerCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 313
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ )
return k
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE : Dict = v.T
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE : int = task_specific_params
SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : Any = Path(args.tf_ckpt_path).parent.name
a__ : int = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a__ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a__ : List[Any] = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
a__ : Dict = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a__ : str = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a__ : Tuple = '''allenai'''
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = dict((re.sub(r'''@@$''' , '''''' , a__ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , a__ ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE : str = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
SCREAMING_SNAKE_CASE : Any = d[k] # restore
return da
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
assert os.path.exists(a__ )
os.makedirs(a__ , exist_ok=a__ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE : List[str] = basename(a__ )
SCREAMING_SNAKE_CASE : List[Any] = dirname(a__ )
SCREAMING_SNAKE_CASE : List[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE : str = cls.hub_models()
SCREAMING_SNAKE_CASE : Dict = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE : int = hub_utils.from_pretrained(
a__ , a__ , a__ , archive_map=a__ , **a__ )
SCREAMING_SNAKE_CASE : Optional[int] = vars(chkpt['''args''']['''model'''] )
SCREAMING_SNAKE_CASE : Dict = args['''source_lang''']
SCREAMING_SNAKE_CASE : List[Any] = args['''target_lang''']
SCREAMING_SNAKE_CASE : Optional[int] = dirname(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = basename(a__ )
# dicts
SCREAMING_SNAKE_CASE : Any = os.path.join(a__ , F"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE : Any = os.path.join(a__ , F"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE : Dict = Dictionary.load(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE : Dict = len(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(a__ , '''vocab-src.json''' )
print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a__ , ensure_ascii=a__ , indent=a__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE : List[str] = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE : List[Any] = False
break
SCREAMING_SNAKE_CASE : Optional[int] = Dictionary.load(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE : str = len(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(a__ , '''vocab-tgt.json''' )
print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a__ , ensure_ascii=a__ , indent=a__ ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE : Tuple = os.path.join(a__ , VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE : str = os.path.join(a__ , a__ )
if os.path.exists(a__ ):
break
with open(a__ , encoding='''utf-8''' ) as fin:
SCREAMING_SNAKE_CASE : Union[str, Any] = fin.read()
SCREAMING_SNAKE_CASE : Optional[int] = re.sub(r''' \d+$''' , '''''' , a__ , 0 , re.M ) # remove frequency number
print(F"""Generating {merges_file}""" )
with open(a__ , '''w''' , encoding='''utf-8''' ) as fout:
fout.write(a__ )
# model config
SCREAMING_SNAKE_CASE : int = os.path.join(a__ , '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args["bpe"]}"""
assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args["tokenizer"]}"""
SCREAMING_SNAKE_CASE : Dict = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE : Tuple = 5
SCREAMING_SNAKE_CASE : int = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE : str = best_score_hparams[model_dir]['''length_penalty''']
else:
SCREAMING_SNAKE_CASE : int = 1.0
print(F"""Generating {fsmt_model_config_file}""" )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a__ , ensure_ascii=a__ , indent=a__ ) )
# tokenizer config
SCREAMING_SNAKE_CASE : Tuple = os.path.join(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 1_024,
'''do_lower_case''': do_lower_case,
}
print(F"""Generating {fsmt_tokenizer_config_file}""" )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a__ , ensure_ascii=a__ , indent=a__ ) )
# model
SCREAMING_SNAKE_CASE : int = chkpt['''models'''][0]
SCREAMING_SNAKE_CASE : str = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE : Any = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE : List[str] = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[int] = FSMTConfig.from_pretrained(a__ )
SCREAMING_SNAKE_CASE : List[Any] = FSMTForConditionalGeneration(a__ )
# check that it loads ok
model_new.load_state_dict(a__ , strict=a__ )
# save
SCREAMING_SNAKE_CASE : List[str] = os.path.join(a__ , a__ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(a__ , a__ )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(F"""cd {data_root}""" )
print(F"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a__ : str = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
a__ : Dict = TypeVar('''T''')
a__ : Optional[int] = TypeVar('''U''')
class a_ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Dict = key
SCREAMING_SNAKE_CASE : Any = val
SCREAMING_SNAKE_CASE : DoubleLinkedListNode[T, U] | None = None
SCREAMING_SNAKE_CASE : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) ->str:
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class a_ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self ) ->None:
SCREAMING_SNAKE_CASE : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.rear, self.head
def __repr__( self ) ->str:
SCREAMING_SNAKE_CASE : List[Any] = ['''DoubleLinkedList''']
SCREAMING_SNAKE_CASE : Tuple = self.head
while node.next is not None:
rep.append(str(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : str = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
SCREAMING_SNAKE_CASE : Union[str, Any] = node
SCREAMING_SNAKE_CASE : Dict = previous
SCREAMING_SNAKE_CASE : List[Any] = node
SCREAMING_SNAKE_CASE : Tuple = self.rear
def __lowerCAmelCase ( self , _lowerCamelCase ) ->DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
SCREAMING_SNAKE_CASE : Union[str, Any] = node.next
SCREAMING_SNAKE_CASE : int = node.prev
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Tuple = None
return node
class a_ ( Generic[T, U] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : DoubleLinkedList[T, U] = DoubleLinkedList()
SCREAMING_SNAKE_CASE : Union[str, Any] = capacity
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) ->str:
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , _lowerCamelCase ) ->bool:
return key in self.cache
def __lowerCAmelCase ( self , _lowerCamelCase ) ->U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
SCREAMING_SNAKE_CASE : DoubleLinkedListNode[T, U] = self.cache[key]
SCREAMING_SNAKE_CASE : Dict = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_lowerCamelCase )
return node.val
self.miss += 1
return None
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
SCREAMING_SNAKE_CASE : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
SCREAMING_SNAKE_CASE : Any = DoubleLinkedListNode(_lowerCamelCase , _lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
SCREAMING_SNAKE_CASE : Any = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
SCREAMING_SNAKE_CASE : Any = value
self.list.add(_lowerCamelCase )
@classmethod
def __lowerCAmelCase ( cls , _lowerCamelCase = 128 ) ->Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(_lowerCamelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*_lowerCamelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
SCREAMING_SNAKE_CASE : Any = LRUCache(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = func(*_lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , _lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_lowerCamelCase , '''cache_info''' , _lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase_( a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def UpperCAmelCase_( a__ , a__ , a__=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE : Any = ''''''
else:
SCREAMING_SNAKE_CASE : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ViTMSNConfig()
SCREAMING_SNAKE_CASE : Optional[int] = 1_000
SCREAMING_SNAKE_CASE : str = '''datasets/huggingface/label-files'''
SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ ) , '''r''' ) )
SCREAMING_SNAKE_CASE : List[Any] = {int(a__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = idalabel
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = 384
SCREAMING_SNAKE_CASE : Any = 1_536
SCREAMING_SNAKE_CASE : List[str] = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = 1_024
SCREAMING_SNAKE_CASE : Optional[int] = 4_096
SCREAMING_SNAKE_CASE : Tuple = 24
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : Dict = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024
SCREAMING_SNAKE_CASE : List[Any] = 4_096
SCREAMING_SNAKE_CASE : List[Any] = 24
SCREAMING_SNAKE_CASE : Tuple = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNModel(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['''target_encoder''']
SCREAMING_SNAKE_CASE : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(a__ )
SCREAMING_SNAKE_CASE : Any = create_rename_keys(a__ , base_model=a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , base_model=a__ )
model.load_state_dict(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(a__ , stream=a__ ).raw )
SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=a__ , image_std=a__ )
SCREAMING_SNAKE_CASE : int = image_processor(images=a__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : Tuple = model(**a__ )
SCREAMING_SNAKE_CASE : str = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
SCREAMING_SNAKE_CASE : List[Any] = Vector()
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : Optional[int] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_lowerCamelCase ) , '''(0,0,0,0,0,1)''' )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(_lowerCamelCase ) , 4 )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : Dict = Vector([1, 2] )
SCREAMING_SNAKE_CASE : str = Vector([1, 2, 3, 4, 5] )
SCREAMING_SNAKE_CASE : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
SCREAMING_SNAKE_CASE : Tuple = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : Dict = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE : List[str] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : List[str] = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE : List[Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : int = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE : Optional[int] = Vector([2, -1, 4] ) # for test of dot product
SCREAMING_SNAKE_CASE : str = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def __lowerCAmelCase ( self ) ->None:
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def __lowerCAmelCase ( self ) ->None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : int = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _lowerCamelCase , _lowerCamelCase ) ) , '''(3,4,7)''' )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : Tuple = Vector([1, 0, 0, 0, 0, 0] )
SCREAMING_SNAKE_CASE : str = x.copy()
self.assertEqual(str(_lowerCamelCase ) , str(_lowerCamelCase ) )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : Dict = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_lowerCamelCase ) , '''(0,1,0)''' )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(_lowerCamelCase ) )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE : Optional[int] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_lowerCamelCase , _lowerCamelCase ) )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE : str = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_lowerCamelCase , _lowerCamelCase ) )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : int = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
SCREAMING_SNAKE_CASE : Optional[Any] = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(_lowerCamelCase ) )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def __lowerCAmelCase ( self ) ->None:
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 313
|
import csv
import tweepy
# Twitter API credentials
a__ : Union[str, Any] = ''''''
a__ : List[str] = ''''''
a__ : Any = ''''''
a__ : List[str] = ''''''
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ )
auth.set_access_token(a__ , a__ )
SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 )
# save most recent tweets
alltweets.extend(a__ )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a__ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE : Any = api.user_timeline(
screen_name=a__ , count=200 , max_id=a__ )
# save most recent tweets
alltweets.extend(a__ )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1
print(F"""...{len(a__ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(a__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 313
| 1
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a__ : Tuple = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a__ : str = 10
a__ : Optional[int] = 256
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) < MIN_NUM_TOKENS:
return None
SCREAMING_SNAKE_CASE : Dict = MinHash(num_perm=a__ )
for token in set(a__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(a__ ) if len(t.strip() ) > 0}
class a_ :
"""simple docstring"""
def __init__( self , *,
_lowerCamelCase = 0.8_5 , ) ->Tuple:
SCREAMING_SNAKE_CASE : str = duplication_jaccard_threshold
SCREAMING_SNAKE_CASE : int = NUM_PERM
SCREAMING_SNAKE_CASE : str = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
SCREAMING_SNAKE_CASE : List[str] = defaultdict(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : Tuple = self._index.query(_lowerCamelCase )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[List[Dict]]:
SCREAMING_SNAKE_CASE : Optional[int] = []
for base, duplicates in self._duplicate_clusters.items():
SCREAMING_SNAKE_CASE : str = [base] + list(_lowerCamelCase )
# reformat the cluster to be a list of dict
SCREAMING_SNAKE_CASE : Dict = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_lowerCamelCase )
return duplicate_clusters
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : Tuple = self.get_duplicate_clusters()
with open(_lowerCamelCase , '''w''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = element
SCREAMING_SNAKE_CASE : str = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a__ , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DuplicationIndex(duplication_jaccard_threshold=a__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a__ ) ) , max_queue_size=100 ) ):
di.add(a__ , a__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = get_tokens(a__ )
SCREAMING_SNAKE_CASE : Dict = get_tokens(a__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a__ : Optional[Any] = None
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = []
for elementa in cluster:
SCREAMING_SNAKE_CASE : Any = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
SCREAMING_SNAKE_CASE : Dict = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(a__ , a__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
SCREAMING_SNAKE_CASE : Dict = 1
extremes.append(a__ )
return extremes
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
global _shared_dataset
SCREAMING_SNAKE_CASE : Dict = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = partial(_find_cluster_extremes_shared , jaccard_threshold=a__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a__ , a__ , ) , total=len(a__ ) , ):
extremes_list.append(a__ )
return extremes_list
def UpperCAmelCase_( a__ , a__ = 0.85 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = make_duplicate_clusters(a__ , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : List[Any] = find_extremes(a__ , a__ , a__ )
for extremes in extremes_clusters:
for element in extremes:
SCREAMING_SNAKE_CASE : int = element
SCREAMING_SNAKE_CASE : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
SCREAMING_SNAKE_CASE : List[Any] = dataset.filter(lambda a__ , a__ : idx not in remove_indices , with_indices=a__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
SCREAMING_SNAKE_CASE : Tuple = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = extreme_dict[element['''base_index''']]['''copies''']
print(F"""Original dataset size: {len(a__ )}""" )
print(F"""Number of duplicate clusters: {len(a__ )}""" )
print(F"""Files in duplicate cluster: {len(a__ )}""" )
print(F"""Unique files in duplicate cluster: {len(a__ )}""" )
print(F"""Filtered dataset size: {len(a__ )}""" )
return ds_filter, duplicate_clusters
| 313
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'ibert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Dict = force_dequant
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313
| 1
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a_ ( a__ ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''num_encoder_blocks''' ) )
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=64 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[2, 2, 2, 2] , _lowerCamelCase=[8, 4, 2, 1] , _lowerCamelCase=[16, 32, 64, 128] , _lowerCamelCase=[1, 4, 8, 16] , _lowerCamelCase=[1, 2, 4, 8] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=None , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Tuple = num_encoder_blocks
SCREAMING_SNAKE_CASE : Tuple = sr_ratios
SCREAMING_SNAKE_CASE : Optional[int] = depths
SCREAMING_SNAKE_CASE : List[Any] = hidden_sizes
SCREAMING_SNAKE_CASE : Dict = downsampling_rates
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->Optional[int]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = SegformerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = SegformerForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : List[str] = SegformerForSemanticSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Optional[int] = SegformerModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = SegformerConfigTester(self , config_class=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_lowerCamelCase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.attentions
SCREAMING_SNAKE_CASE : List[str] = sum(self.model_tester.depths )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE : int = (self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE : Optional[int] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
SCREAMING_SNAKE_CASE : Union[str, Any] = (self.model_tester.image_size // 32) ** 2
SCREAMING_SNAKE_CASE : List[str] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE : List[str] = (self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE : Dict = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __lowerCAmelCase ( self ) ->Tuple:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.num_encoder_blocks
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ):
continue
SCREAMING_SNAKE_CASE : Tuple = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = model(**_lowerCamelCase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
@slow
def __lowerCAmelCase ( self ) ->List[Any]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = SegformerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ) ->str:
# only resize + normalize
SCREAMING_SNAKE_CASE : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : int = encoded_inputs.pixel_values.to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self ) ->str:
# only resize + normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = encoded_inputs.pixel_values.to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-1 ) )
@slow
def __lowerCAmelCase ( self ) ->str:
# only resize + normalize
SCREAMING_SNAKE_CASE : str = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = prepare_img()
SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Any = encoded_inputs.pixel_values.to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
| 313
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313
| 1
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Any = parent
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return {}
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
SCREAMING_SNAKE_CASE : Any = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = MarkupLMFeatureExtractionTester(self )
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __lowerCAmelCase ( self ) ->Any:
# Initialize feature_extractor
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
SCREAMING_SNAKE_CASE : str = feature_extractor(_lowerCamelCase )
# fmt: off
SCREAMING_SNAKE_CASE : int = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
SCREAMING_SNAKE_CASE : Any = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , _lowerCamelCase )
self.assertEqual(encoding.xpaths , _lowerCamelCase )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = get_html_strings()
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(_lowerCamelCase )
# fmt: off
SCREAMING_SNAKE_CASE : List[str] = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
SCREAMING_SNAKE_CASE : int = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _lowerCamelCase )
self.assertEqual(encoding.xpaths , _lowerCamelCase )
| 313
|
from maths.prime_check import is_prime
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(a__ )
if is_prime(a__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a__ : str = logging.get_logger(__name__)
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) ->None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[str] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 32
@property
def __lowerCAmelCase ( self ) ->str:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Dict:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Tuple:
return 100
@property
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = self.dummy_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : List[str] = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
a__ : Dict = logging.getLogger(__name__)
a__ : Union[str, Any] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
a__ : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(a__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def __lowerCAmelCase ( self ) ->Any:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=a__ , metadata={'help': 'The input training data file (a text file).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=a__ , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=a__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__SCREAMING_SNAKE_CASE : float = field(
default=0.1_5 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def __lowerCAmelCase ( self ) ->Dict:
if self.train_file is not None:
SCREAMING_SNAKE_CASE : Any = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Any = [json.loads(a__ ) for line in f.read().splitlines() if (len(a__ ) > 0 and not line.isspace())]
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : int = {c: dataset[c] for c in dataset.column_names}
SCREAMING_SNAKE_CASE : Tuple = refs
return Dataset.from_dict(a__ )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , a__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
SCREAMING_SNAKE_CASE : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
SCREAMING_SNAKE_CASE : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE : str = data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE : str = data_args.validation_file
SCREAMING_SNAKE_CASE : List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
SCREAMING_SNAKE_CASE : str = '''text'''
SCREAMING_SNAKE_CASE : Dict = load_dataset(a__ , data_files=a__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(model_args.config_name , **a__ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **a__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
SCREAMING_SNAKE_CASE : str = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **a__ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **a__ )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : str = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
SCREAMING_SNAKE_CASE : Dict = AutoModelForMaskedLM.from_config(a__ )
model.resize_token_embeddings(len(a__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
SCREAMING_SNAKE_CASE : str = datasets['''train'''].column_names
else:
SCREAMING_SNAKE_CASE : int = datasets['''validation'''].column_names
SCREAMING_SNAKE_CASE : List[str] = '''text''' if '''text''' in column_names else column_names[0]
SCREAMING_SNAKE_CASE : int = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(a__ ):
# Remove empty lines
SCREAMING_SNAKE_CASE : Optional[int] = [line for line in examples['''text'''] if len(a__ ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=a__ , truncation=a__ , max_length=data_args.max_seq_length )
SCREAMING_SNAKE_CASE : Any = datasets.map(
a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
SCREAMING_SNAKE_CASE : str = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
SCREAMING_SNAKE_CASE : str = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
SCREAMING_SNAKE_CASE : str = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
SCREAMING_SNAKE_CASE : Any = False
# Data collator
# This one will take care of randomly masking the tokens.
SCREAMING_SNAKE_CASE : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=a__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[int] = Trainer(
model=a__ , args=a__ , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
SCREAMING_SNAKE_CASE : str = model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(a__ , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
SCREAMING_SNAKE_CASE : Any = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Dict = trainer.evaluate()
SCREAMING_SNAKE_CASE : Any = math.exp(eval_output['''eval_loss'''] )
SCREAMING_SNAKE_CASE : Optional[int] = perplexity
SCREAMING_SNAKE_CASE : Tuple = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(a__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def UpperCAmelCase_( a__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 313
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a__ : List[str] = '''CompVis/stable-diffusion-v1-1'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
a__ : Any = '''CompVis/stable-diffusion-v1-3'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4'''
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str:
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) ->Dict[str, Any]:
return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 313
| 1
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : str = logging.get_logger(__name__)
a__ : str = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'efficientformer'
def __init__( self , _lowerCamelCase = [3, 2, 6, 4] , _lowerCamelCase = [48, 96, 224, 448] , _lowerCamelCase = [True, True, True, True] , _lowerCamelCase = 448 , _lowerCamelCase = 32 , _lowerCamelCase = 4 , _lowerCamelCase = 7 , _lowerCamelCase = 5 , _lowerCamelCase = 8 , _lowerCamelCase = 4 , _lowerCamelCase = 0.0 , _lowerCamelCase = 16 , _lowerCamelCase = 3 , _lowerCamelCase = 3 , _lowerCamelCase = 3 , _lowerCamelCase = 2 , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = 1 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = 1e-5 , _lowerCamelCase = "gelu" , _lowerCamelCase = 0.0_2 , _lowerCamelCase = 1e-12 , _lowerCamelCase = 224 , _lowerCamelCase = 1e-05 , **_lowerCamelCase , ) ->None:
super().__init__(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = hidden_sizes
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : str = depths
SCREAMING_SNAKE_CASE : List[Any] = mlp_expansion_ratio
SCREAMING_SNAKE_CASE : Any = downsamples
SCREAMING_SNAKE_CASE : Optional[int] = dim
SCREAMING_SNAKE_CASE : Tuple = key_dim
SCREAMING_SNAKE_CASE : Optional[int] = attention_ratio
SCREAMING_SNAKE_CASE : Optional[Any] = resolution
SCREAMING_SNAKE_CASE : int = pool_size
SCREAMING_SNAKE_CASE : int = downsample_patch_size
SCREAMING_SNAKE_CASE : Optional[Any] = downsample_stride
SCREAMING_SNAKE_CASE : str = downsample_pad
SCREAMING_SNAKE_CASE : Optional[int] = drop_path_rate
SCREAMING_SNAKE_CASE : Any = num_metaad_blocks
SCREAMING_SNAKE_CASE : Any = distillation
SCREAMING_SNAKE_CASE : Optional[Any] = use_layer_scale
SCREAMING_SNAKE_CASE : int = layer_scale_init_value
SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE : Dict = batch_norm_eps
| 313
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : List[Any] = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'funnel'
__SCREAMING_SNAKE_CASE : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=[4, 4, 4] , _lowerCamelCase=None , _lowerCamelCase=2 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=64 , _lowerCamelCase=3072 , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=None , _lowerCamelCase=1e-9 , _lowerCamelCase="mean" , _lowerCamelCase="relative_shift" , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , **_lowerCamelCase , ) ->Tuple:
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Any = block_sizes
SCREAMING_SNAKE_CASE : Union[str, Any] = [1] * len(_lowerCamelCase ) if block_repeats is None else block_repeats
assert len(_lowerCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE : Tuple = num_decoder_layers
SCREAMING_SNAKE_CASE : Dict = d_model
SCREAMING_SNAKE_CASE : int = n_head
SCREAMING_SNAKE_CASE : Optional[int] = d_head
SCREAMING_SNAKE_CASE : List[str] = d_inner
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout
SCREAMING_SNAKE_CASE : Dict = attention_dropout
SCREAMING_SNAKE_CASE : int = activation_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = initializer_std
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
SCREAMING_SNAKE_CASE : List[Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_type
SCREAMING_SNAKE_CASE : int = separate_cls
SCREAMING_SNAKE_CASE : List[Any] = truncate_seq
SCREAMING_SNAKE_CASE : int = pool_q_only
super().__init__(**_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->List[Any]:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowerCAmelCase ( self ) ->Tuple:
return len(self.block_sizes )
@num_blocks.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Tuple = frozenset([] )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = 32
SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL()
SCREAMING_SNAKE_CASE : Optional[Any] = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if pil_image:
SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase )
inputs.update({'''image_embeds''': None} )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) ->Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Dict = pipe(
_lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 313
| 1
|
from __future__ import annotations
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 2
SCREAMING_SNAKE_CASE : Optional[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(a__ )
if n > 1:
factors.append(a__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
from abc import ABC, abstractmethod
from typing import List, Optional
class a_ ( a__ ):
"""simple docstring"""
def __init__( self ) ->List[str]:
# test for the above condition
self.test()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : List[Any] = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Union[str, Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids )
SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : str = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self ) ->Any:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict:
SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Dict = self.seqlen
SCREAMING_SNAKE_CASE : int = self.fulfilled_idx
SCREAMING_SNAKE_CASE : Tuple = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict:
SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : List[str] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Tuple = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = root
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : int = start[current_token]
SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = nested_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[Any] = completed
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = []
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]:
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : int = self.current_seq
SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : str = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str:
SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : str = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 313
| 1
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def __lowerCAmelCase ( self ) ->Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : List[str] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = DDIMScheduler()
SCREAMING_SNAKE_CASE : str = self.dummy_vq_model
SCREAMING_SNAKE_CASE : Optional[int] = LDMPipeline(unet=_lowerCamelCase , vqvae=_lowerCamelCase , scheduler=_lowerCamelCase )
ldm.to(_lowerCamelCase )
ldm.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = ldm(generator=_lowerCamelCase , num_inference_steps=2 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = ldm(generator=_lowerCamelCase , num_inference_steps=2 , output_type='''numpy''' , return_dict=_lowerCamelCase )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
SCREAMING_SNAKE_CASE : Optional[int] = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(_lowerCamelCase )
ldm.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = ldm(generator=_lowerCamelCase , num_inference_steps=5 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : List[str] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
SCREAMING_SNAKE_CASE : Optional[int] = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 313
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets(
a__ , a__ , number=a__ , min_len=1_026 , trim=a__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ )
print('''computing perplexity on objective set''' )
SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item()
print('''perplexity on objective set:''' , a__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ )
# Train secondary learner
SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner(
a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ )
SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ )
SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(a__ )
secondary_learner.eval()
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Tuple = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
for epoch in range(int(a__ ) ):
for step, example in enumerate(a__ ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward(
torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE : str = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE : List[str] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=a__ , default=a__ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=a__ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=a__ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner(
a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 313
| 1
|
import random
from typing import Any
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for _ in range(len(a__ ) ):
SCREAMING_SNAKE_CASE : str = random.randint(0 , len(a__ ) - 1 )
SCREAMING_SNAKE_CASE : int = random.randint(0 , len(a__ ) - 1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = data[b], data[a]
return data
if __name__ == "__main__":
a__ : List[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
a__ : Tuple = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 313
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = filter(lambda a__ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Any = logging.getLogger(__name__)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if metric == "rouge2":
SCREAMING_SNAKE_CASE : str = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
SCREAMING_SNAKE_CASE : List[Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
SCREAMING_SNAKE_CASE : int = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
SCREAMING_SNAKE_CASE : int = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
SCREAMING_SNAKE_CASE : Dict = ModelCheckpoint(
dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a__ , verbose=a__ , )
class a_ ( pl.Callback ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE : Any = od / '''test_results.txt'''
SCREAMING_SNAKE_CASE : Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
SCREAMING_SNAKE_CASE : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , '''a+''' ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE : Tuple = metrics[key]
if isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = val.item()
SCREAMING_SNAKE_CASE : Tuple = F"""{key}: {val:.6f}\n"""
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
try:
SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE : Optional[int] = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE : int = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 313
| 1
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a__ : Union[str, Any] = get_logger(__name__)
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__=0 ):
"""simple docstring"""
os.makedirs(a__ , exist_ok=a__ )
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE : Union[str, Any] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE : Dict = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(a__ , a__ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(a__ , a__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
SCREAMING_SNAKE_CASE : Tuple = os.path.join(a__ , a__ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(a__ , a__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE : Any = os.path.join(a__ , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(a__ , exist_ok=a__ )
logger.info(F"""Saving model to {ckpt_dir}""" )
SCREAMING_SNAKE_CASE : List[Any] = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=a__ , storage_writer=dist_cp.FileSystemWriter(a__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(a__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
SCREAMING_SNAKE_CASE : Optional[Any] = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
SCREAMING_SNAKE_CASE : int = os.path.join(a__ , a__ )
logger.info(F"""Loading model from {input_model_file}""" )
SCREAMING_SNAKE_CASE : List[str] = torch.load(a__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE : Optional[Any] = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
SCREAMING_SNAKE_CASE : List[str] = os.path.join(a__ , a__ )
logger.info(F"""Loading model from {input_model_file}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(a__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
os.path.join(a__ , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
SCREAMING_SNAKE_CASE : str = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=a__ , storage_reader=dist_cp.FileSystemReader(a__ ) , planner=DefaultLoadPlanner() , )
SCREAMING_SNAKE_CASE : Tuple = state_dict['''model''']
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(a__ )
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , a__=0 ):
"""simple docstring"""
os.makedirs(a__ , exist_ok=a__ )
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE : Union[str, Any] = FSDP.optim_state_dict(a__ , a__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(a__ , a__ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(a__ , a__ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(a__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(a__ , exist_ok=a__ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(a__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , a__=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
SCREAMING_SNAKE_CASE : List[Any] = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(a__ , a__ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
SCREAMING_SNAKE_CASE : Tuple = torch.load(a__ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
SCREAMING_SNAKE_CASE : Optional[int] = (
os.path.join(a__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(a__ ) , )
SCREAMING_SNAKE_CASE : Any = optim_state['''optimizer''']
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
SCREAMING_SNAKE_CASE : List[str] = FSDP.optim_state_dict_to_load(a__ , a__ , a__ )
optimizer.load_state_dict(a__ )
| 313
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE : str = ord(a__ )
if not _is_chinese_char(a__ ):
return 0
return 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = set()
for token in tokens:
SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ )
if chinese_word:
word_set.add(a__ )
SCREAMING_SNAKE_CASE : str = list(a__ )
return word_list
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE : Tuple = bert_tokens
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ )
while start < end:
SCREAMING_SNAKE_CASE : Dict = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ )
for i in range(a__ , 1 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j]
SCREAMING_SNAKE_CASE : List[str] = start + i
SCREAMING_SNAKE_CASE : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res]
ltp_res.extend(a__ )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : Any = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : int = []
for input_ids, chinese_word in zip(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = []
for id in input_ids:
SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ )
input_tokens.append(a__ )
SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a__ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE : Optional[int] = token[2:]
# save chinese tokens' pos
if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ):
ref_id.append(a__ )
ref_ids.append(a__ )
assert len(a__ ) == len(a__ )
return ref_ids
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids]
f.writelines(a__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
a__ : int = parser.parse_args()
main(args)
| 313
| 1
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = abs(a__ )
SCREAMING_SNAKE_CASE : int = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = abs(a__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return sum(int(a__ ) for c in str(abs(a__ ) ) )
def UpperCAmelCase_( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a__ , a__ ) -> None:
SCREAMING_SNAKE_CASE : Dict = F"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE : str = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(a__ )} -- {timing:.4f} seconds""" )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(a__ , a__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 313
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Tuple = '''1'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Dict = '''1'''
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : Dict = '''alsa'''
SCREAMING_SNAKE_CASE : Any = '''default'''
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation'''
SCREAMING_SNAKE_CASE : Optional[int] = ''':0'''
elif system == "Windows":
SCREAMING_SNAKE_CASE : int = '''dshow'''
SCREAMING_SNAKE_CASE : Any = '''default'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s
SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Any = np.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = b''''''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
SCREAMING_SNAKE_CASE : List[str] = False
yield item
SCREAMING_SNAKE_CASE : Dict = stride_left
SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
yield item
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 313
| 1
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a_ :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *_lowerCamelCase , **_lowerCamelCase ) ->str:
pass
@is_pipeline_test
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : Any = image_classifier(_lowerCamelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowerCamelCase ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE : Optional[int] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
] , )
@require_tf
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : int = image_classifier(_lowerCamelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
] , )
@slow
@require_torch
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_classifier(_lowerCamelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE : int = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[Any] = image_classifier(_lowerCamelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE : List[Any] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 313
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : int = data
def __iter__( self ) ->Dict:
for element in self.data:
yield element
def UpperCAmelCase_( a__=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Accelerator(even_batches=a__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ):
"""simple docstring"""
if iterable:
SCREAMING_SNAKE_CASE : int = DummyIterableDataset(torch.as_tensor(range(a__ ) ) )
else:
SCREAMING_SNAKE_CASE : int = TensorDataset(torch.as_tensor(range(a__ ) ) )
SCREAMING_SNAKE_CASE : List[Any] = DataLoader(a__ , batch_size=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(a__ )
return dl
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = create_dataloader(accelerator=a__ , dataset_size=a__ , batch_size=a__ )
SCREAMING_SNAKE_CASE : Optional[int] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = create_accelerator(even_batches=a__ )
verify_dataloader_batch_sizes(
a__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = create_accelerator(even_batches=a__ )
SCREAMING_SNAKE_CASE : str = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = create_dataloader(a__ , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE : List[Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(a__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ddp_model(batch[0].float() )
SCREAMING_SNAKE_CASE : Dict = output.sum()
loss.backward()
batch_idxs.append(a__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with warnings.catch_warnings(record=a__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , a__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = create_accelerator(even_batches=a__ )
SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE : str = accelerator.prepare(a__ )
SCREAMING_SNAKE_CASE : int = create_dataloader(a__ , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE : Dict = create_dataloader(a__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__ ):
SCREAMING_SNAKE_CASE : Tuple = train_dl.batch_sampler.even_batches
SCREAMING_SNAKE_CASE : List[Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Tuple = create_accelerator(even_batches=a__ )
SCREAMING_SNAKE_CASE : int = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(a__ )
create_dataloader(a__ , dataset_size=3 , batch_size=1 , iterable=a__ )
SCREAMING_SNAKE_CASE : Any = create_dataloader(a__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__ ):
SCREAMING_SNAKE_CASE : Optional[int] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = create_accelerator()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE : int = accelerator.prepare(a__ )
create_dataloader(a__ , dataset_size=3 , batch_size=1 , iterable=a__ )
with warnings.catch_warnings(record=a__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__ ):
pass
assert issubclass(w[-1].category , a__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
SCREAMING_SNAKE_CASE : Tuple = accelerator.state.distributed_type
SCREAMING_SNAKE_CASE : Optional[int] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a__ )
SCREAMING_SNAKE_CASE : Tuple = original_state
if __name__ == "__main__":
main()
| 313
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr'
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = use_timm_backbone
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = backbone
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : str = num_feature_levels
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points
SCREAMING_SNAKE_CASE : Any = decoder_n_points
SCREAMING_SNAKE_CASE : str = two_stage
SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : int = class_cost
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost
SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) ->int:
return self.d_model
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 313
| 1
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaForSequenceClassification.from_pretrained(a__ , config=a__ )
SCREAMING_SNAKE_CASE : int = downstream_dict['''projector.weight''']
SCREAMING_SNAKE_CASE : Any = downstream_dict['''projector.bias''']
SCREAMING_SNAKE_CASE : List[str] = downstream_dict['''model.post_net.linear.weight''']
SCREAMING_SNAKE_CASE : str = downstream_dict['''model.post_net.linear.bias''']
return model
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaForAudioFrameClassification.from_pretrained(a__ , config=a__ )
SCREAMING_SNAKE_CASE : Dict = downstream_dict['''model.linear.weight''']
SCREAMING_SNAKE_CASE : str = downstream_dict['''model.linear.bias''']
return model
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaForXVector.from_pretrained(a__ , config=a__ )
SCREAMING_SNAKE_CASE : int = downstream_dict['''connector.weight''']
SCREAMING_SNAKE_CASE : Tuple = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE : Dict = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
SCREAMING_SNAKE_CASE : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
SCREAMING_SNAKE_CASE : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
SCREAMING_SNAKE_CASE : Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def UpperCAmelCase_( a__ , a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = torch.load(a__ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Tuple = checkpoint['''Downstream''']
SCREAMING_SNAKE_CASE : str = WavaVecaConfig.from_pretrained(a__ )
SCREAMING_SNAKE_CASE : List[str] = WavaVecaFeatureExtractor.from_pretrained(
a__ , return_attention_mask=a__ , do_normalize=a__ )
SCREAMING_SNAKE_CASE : Tuple = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
SCREAMING_SNAKE_CASE : int = convert_classification(a__ , a__ , a__ )
elif arch.endswith('''ForAudioFrameClassification''' ):
SCREAMING_SNAKE_CASE : List[Any] = convert_diarization(a__ , a__ , a__ )
elif arch.endswith('''ForXVector''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = convert_xvector(a__ , a__ , a__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE : Any = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(a__ )
hf_model.save_pretrained(a__ )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
a__ : Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 313
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,)
__SCREAMING_SNAKE_CASE : Optional[int] = 10
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCamelCase )
return config
def __lowerCAmelCase ( self ) ->Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = output.prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 313
| 1
|
from ...processing_utils import ProcessorMixin
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = 'SpeechT5FeatureExtractor'
__SCREAMING_SNAKE_CASE : int = 'SpeechT5Tokenizer'
def __init__( self , _lowerCamelCase , _lowerCamelCase ) ->Any:
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''audio''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''text''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = kwargs.pop('''text_target''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''audio_target''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE : List[str] = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
elif text is not None:
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if audio_target is not None:
SCREAMING_SNAKE_CASE : Any = self.feature_extractor(audio_target=_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = targets['''input_values''']
elif text_target is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE : int = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE : List[Any] = labels
SCREAMING_SNAKE_CASE : str = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE : Dict = decoder_attention_mask
return inputs
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : str = kwargs.pop('''input_values''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop('''input_ids''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''labels''' , _lowerCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
elif input_ids is not None:
SCREAMING_SNAKE_CASE : Any = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : int = None
if labels is not None:
if "input_ids" in labels or (isinstance(_lowerCamelCase , _lowerCamelCase ) and "input_ids" in labels[0]):
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE : Dict = self.feature_extractor.feature_size
SCREAMING_SNAKE_CASE : Any = self.feature_extractor.num_mel_bins
SCREAMING_SNAKE_CASE : Dict = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = feature_size_hack
SCREAMING_SNAKE_CASE : List[str] = targets['''input_values''']
else:
SCREAMING_SNAKE_CASE : List[str] = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE : Any = labels
SCREAMING_SNAKE_CASE : Optional[int] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE : List[str] = decoder_attention_mask
return inputs
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->int:
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]:
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
| 313
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313
| 1
|
from __future__ import annotations
import math
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
SCREAMING_SNAKE_CASE : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
SCREAMING_SNAKE_CASE : str = len(a__ )
SCREAMING_SNAKE_CASE : Any = matrix_length // 2
SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : Optional[int] = [
[a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ )
]
SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return len(a__ ), len(matrix[0] )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
print('''\n'''.join(str(a__ ) for line in matrix ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ ) == (2, 2):
return default_matrix_multiplication(a__ , a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(a__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]:
SCREAMING_SNAKE_CASE : Any = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a__ )
SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ )
SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = matrixa
SCREAMING_SNAKE_CASE : Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ )
# Removing the additional zeros
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 313
|
from __future__ import annotations
import math
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
SCREAMING_SNAKE_CASE : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
SCREAMING_SNAKE_CASE : str = len(a__ )
SCREAMING_SNAKE_CASE : Any = matrix_length // 2
SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : Optional[int] = [
[a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ )
]
SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return len(a__ ), len(matrix[0] )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
print('''\n'''.join(str(a__ ) for line in matrix ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ ) == (2, 2):
return default_matrix_multiplication(a__ , a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(a__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]:
SCREAMING_SNAKE_CASE : Any = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a__ )
SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ )
SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = matrixa
SCREAMING_SNAKE_CASE : Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ )
# Removing the additional zeros
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 313
| 1
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if any(not isinstance(a__ , a__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(a__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(a__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 313
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any:
SCREAMING_SNAKE_CASE : str = scheduler
SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches
SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer
SCREAMING_SNAKE_CASE : List[str] = GradientState()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes
for _ in range(_lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return self.scheduler.get_last_lr()
def __lowerCAmelCase ( self ) ->List[str]:
return self.scheduler.state_dict()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
self.scheduler.load_state_dict(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
return self.scheduler.get_lr()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]:
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
| 313
| 1
|
from abc import ABC, abstractmethod
from typing import List, Optional
class a_ ( a__ ):
"""simple docstring"""
def __init__( self ) ->List[str]:
# test for the above condition
self.test()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : List[Any] = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Union[str, Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids )
SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : str = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self ) ->Any:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict:
SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Dict = self.seqlen
SCREAMING_SNAKE_CASE : int = self.fulfilled_idx
SCREAMING_SNAKE_CASE : Tuple = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict:
SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : List[str] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Tuple = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = root
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : int = start[current_token]
SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = nested_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[Any] = completed
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = []
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]:
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : int = self.current_seq
SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : str = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str:
SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : str = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 313
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ )
return k
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE : Dict = v.T
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE : int = task_specific_params
SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : Any = Path(args.tf_ckpt_path).parent.name
a__ : int = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
import operator as op
a__ : Dict = '''scaler.pt'''
a__ : Dict = '''pytorch_model'''
a__ : Any = '''random_states'''
a__ : Optional[int] = '''optimizer'''
a__ : Tuple = '''scheduler'''
a__ : int = '''pytorch_model.bin'''
a__ : str = '''pytorch_model.bin.index.json'''
a__ : Optional[Any] = '''model.safetensors'''
a__ : str = '''model.safetensors.index.json'''
a__ : Optional[int] = '''1.10.2'''
a__ : int = '''py38'''
a__ : str = '''4.17.0'''
a__ : Optional[int] = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
a__ : List[Any] = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
a__ : List[str] = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
a__ : int = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
a__ : List[str] = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
a__ : int = '''2.0.1'''
a__ : Union[str, Any] = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
a__ : Optional[Any] = ['''default''', '''reduce-overhead''', '''max-autotune''']
a__ : Union[str, Any] = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
a__ : List[Any] = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
a__ : Optional[int] = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
a__ : Any = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a__ : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
a__ : List[Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
a__ : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(a__ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE : Dict = Image.open(a__ )
return im.convert('''RGB''' )
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=a__ , metadata={'help': 'A folder containing the training data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=a__ , metadata={'help': 'A folder containing the validation data.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=a__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=a__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __lowerCAmelCase ( self ) ->Tuple:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(a__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : str = field(default=a__ , metadata={'help': 'Name or path of preprocessor config.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.stack([example['''pixel_values'''] for example in examples] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , a__ , a__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Any = training_args.get_process_log_level()
logger.setLevel(a__ )
transformers.utils.logging.set_verbosity(a__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE : Any = {}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE : int = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE : List[str] = os.path.join(data_args.validation_dir , '''**''' )
SCREAMING_SNAKE_CASE : Tuple = load_dataset(
'''imagefolder''' , data_files=a__ , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE : Dict = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , a__ ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE : Any = dataset['''train'''].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE : Any = split['''train''']
SCREAMING_SNAKE_CASE : Union[str, Any] = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE : Dict = dataset['''train'''].features['''labels'''].names
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = {}, {}
for i, label in enumerate(a__ ):
SCREAMING_SNAKE_CASE : List[Any] = str(a__ )
SCREAMING_SNAKE_CASE : Tuple = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a__ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(a__ ) , labelaid=a__ , idalabel=a__ , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE : Any = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE : List[str] = image_processor.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = (image_processor.size['''height'''], image_processor.size['''width'''])
SCREAMING_SNAKE_CASE : Optional[Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
SCREAMING_SNAKE_CASE : Tuple = Compose(
[
RandomResizedCrop(a__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE : int = Compose(
[
Resize(a__ ),
CenterCrop(a__ ),
ToTensor(),
normalize,
] )
def train_transforms(a__ ):
SCREAMING_SNAKE_CASE : Dict = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(a__ ):
SCREAMING_SNAKE_CASE : List[str] = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(a__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : List[str] = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(a__ )
# Initalize our trainer
SCREAMING_SNAKE_CASE : Dict = Trainer(
model=a__ , args=a__ , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=a__ , tokenizer=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[Any] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE : Any = trainer.evaluate()
trainer.log_metrics('''eval''' , a__ )
trainer.save_metrics('''eval''' , a__ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE : Optional[int] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a__ )
else:
trainer.create_model_card(**a__ )
if __name__ == "__main__":
main()
| 313
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase_( a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def UpperCAmelCase_( a__ , a__ , a__=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE : Any = ''''''
else:
SCREAMING_SNAKE_CASE : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ViTMSNConfig()
SCREAMING_SNAKE_CASE : Optional[int] = 1_000
SCREAMING_SNAKE_CASE : str = '''datasets/huggingface/label-files'''
SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ ) , '''r''' ) )
SCREAMING_SNAKE_CASE : List[Any] = {int(a__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = idalabel
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = 384
SCREAMING_SNAKE_CASE : Any = 1_536
SCREAMING_SNAKE_CASE : List[str] = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = 1_024
SCREAMING_SNAKE_CASE : Optional[int] = 4_096
SCREAMING_SNAKE_CASE : Tuple = 24
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : Dict = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024
SCREAMING_SNAKE_CASE : List[Any] = 4_096
SCREAMING_SNAKE_CASE : List[Any] = 24
SCREAMING_SNAKE_CASE : Tuple = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNModel(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['''target_encoder''']
SCREAMING_SNAKE_CASE : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(a__ )
SCREAMING_SNAKE_CASE : Any = create_rename_keys(a__ , base_model=a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , base_model=a__ )
model.load_state_dict(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(a__ , stream=a__ ).raw )
SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=a__ , image_std=a__ )
SCREAMING_SNAKE_CASE : int = image_processor(images=a__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : Tuple = model(**a__ )
SCREAMING_SNAKE_CASE : str = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = IFInpaintingSuperResolutionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__SCREAMING_SNAKE_CASE : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__SCREAMING_SNAKE_CASE : Any = PipelineTesterMixin.required_optional_params - {'latents'}
def __lowerCAmelCase ( self ) ->Optional[Any]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->List[Any]:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) ->Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __lowerCAmelCase ( self ) ->str:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __lowerCAmelCase ( self ) ->Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __lowerCAmelCase ( self ) ->Dict:
self._test_save_load_local()
def __lowerCAmelCase ( self ) ->Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 313
|
import csv
import tweepy
# Twitter API credentials
a__ : Union[str, Any] = ''''''
a__ : List[str] = ''''''
a__ : Any = ''''''
a__ : List[str] = ''''''
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ )
auth.set_access_token(a__ , a__ )
SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 )
# save most recent tweets
alltweets.extend(a__ )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a__ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE : Any = api.user_timeline(
screen_name=a__ , count=200 , max_id=a__ )
# save most recent tweets
alltweets.extend(a__ )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1
print(F"""...{len(a__ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(a__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 313
| 1
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a__ : Optional[int] = logging.get_logger(__name__)
# General docstring
a__ : str = '''MobileNetV1Config'''
# Base docstring
a__ : Union[str, Any] = '''google/mobilenet_v1_1.0_224'''
a__ : Tuple = [1, 1_024, 7, 7]
# Image classification docstring
a__ : Union[str, Any] = '''google/mobilenet_v1_1.0_224'''
a__ : Any = '''tabby, tabby cat'''
a__ : int = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCAmelCase_( a__ , a__ , a__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {}
if isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : int = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE : Optional[Any] = model
SCREAMING_SNAKE_CASE : int = '''MobilenetV1/Conv2d_0/'''
SCREAMING_SNAKE_CASE : int = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE : Union[str, Any] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE : Union[str, Any] = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE : Optional[Any] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE : int = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = i + 1
SCREAMING_SNAKE_CASE : int = i * 2
SCREAMING_SNAKE_CASE : int = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
SCREAMING_SNAKE_CASE : int = pointer.convolution.weight
SCREAMING_SNAKE_CASE : Union[str, Any] = pointer.normalization.bias
SCREAMING_SNAKE_CASE : str = pointer.normalization.weight
SCREAMING_SNAKE_CASE : int = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE : Any = pointer.normalization.running_var
SCREAMING_SNAKE_CASE : int = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE : Any = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
SCREAMING_SNAKE_CASE : List[Any] = pointer.convolution.weight
SCREAMING_SNAKE_CASE : Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE : Tuple = pointer.normalization.weight
SCREAMING_SNAKE_CASE : List[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE : str = pointer.normalization.running_var
if isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : Dict = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
SCREAMING_SNAKE_CASE : Dict = model.classifier.weight
SCREAMING_SNAKE_CASE : Union[str, Any] = model.classifier.bias
return tf_to_pt_map
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE : Optional[int] = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE : Optional[Any] = _build_tf_to_pytorch_map(a__ , a__ , a__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
SCREAMING_SNAKE_CASE : Any = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
SCREAMING_SNAKE_CASE : str = np.transpose(a__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE : List[Any] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE : Tuple = np.transpose(a__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(a__ )
tf_weights.pop(a__ , a__ )
tf_weights.pop(name + '''/RMSProp''' , a__ )
tf_weights.pop(name + '''/RMSProp_1''' , a__ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , a__ )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = features.shape[-2:]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = conv_layer.stride
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE : List[str] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE : Optional[Any] = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE : List[str] = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE : int = pad_along_width // 2
SCREAMING_SNAKE_CASE : Any = pad_along_width - pad_left
SCREAMING_SNAKE_CASE : Dict = pad_along_height // 2
SCREAMING_SNAKE_CASE : Dict = pad_along_height - pad_top
SCREAMING_SNAKE_CASE : Any = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(a__ , a__ , '''constant''' , 0.0 )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = 1 , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = True , ) ->None:
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
SCREAMING_SNAKE_CASE : List[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
SCREAMING_SNAKE_CASE : Tuple = nn.Convad(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=_lowerCamelCase , stride=_lowerCamelCase , padding=_lowerCamelCase , groups=_lowerCamelCase , bias=_lowerCamelCase , padding_mode='''zeros''' , )
if use_normalization:
SCREAMING_SNAKE_CASE : List[Any] = nn.BatchNormad(
num_features=_lowerCamelCase , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=_lowerCamelCase , track_running_stats=_lowerCamelCase , )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
if use_activation:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE : int = config.hidden_act
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def __lowerCAmelCase ( self , _lowerCamelCase ) ->torch.Tensor:
if self.config.tf_padding:
SCREAMING_SNAKE_CASE : Any = apply_tf_padding(_lowerCamelCase , self.convolution )
SCREAMING_SNAKE_CASE : Optional[int] = self.convolution(_lowerCamelCase )
if self.normalization is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.normalization(_lowerCamelCase )
if self.activation is not None:
SCREAMING_SNAKE_CASE : Any = self.activation(_lowerCamelCase )
return features
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = MobileNetVaConfig
__SCREAMING_SNAKE_CASE : Dict = load_tf_weights_in_mobilenet_va
__SCREAMING_SNAKE_CASE : List[str] = 'mobilenet_v1'
__SCREAMING_SNAKE_CASE : Dict = 'pixel_values'
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
if isinstance(_lowerCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowerCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
a__ : Any = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a__ : Optional[Any] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , a__ , )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = True ) ->List[Any]:
super().__init__(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = config
SCREAMING_SNAKE_CASE : int = 32
SCREAMING_SNAKE_CASE : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
SCREAMING_SNAKE_CASE : int = MobileNetVaConvLayer(
_lowerCamelCase , in_channels=config.num_channels , out_channels=_lowerCamelCase , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList()
for i in range(13 ):
SCREAMING_SNAKE_CASE : Tuple = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE : Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=_lowerCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=1 , ) )
SCREAMING_SNAKE_CASE : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
raise NotImplementedError
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCAmelCase ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ) ->Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
SCREAMING_SNAKE_CASE : Dict = self.conv_stem(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
SCREAMING_SNAKE_CASE : Dict = layer_module(_lowerCamelCase )
if output_hidden_states:
SCREAMING_SNAKE_CASE : Dict = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE : Dict = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE : Tuple = torch.flatten(self.pooler(_lowerCamelCase ) , start_dim=1 )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCamelCase , pooler_output=_lowerCamelCase , hidden_states=_lowerCamelCase , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a__ , )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->None:
super().__init__(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = config.num_labels
SCREAMING_SNAKE_CASE : List[Any] = MobileNetVaModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE : Any = nn.Dropout(config.classifier_dropout_prob , inplace=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = nn.Linear(_lowerCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCAmelCase ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ) ->Union[tuple, ImageClassifierOutputWithNoAttention]:
SCREAMING_SNAKE_CASE : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : str = self.mobilenet_va(_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE : List[Any] = self.classifier(self.dropout(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : List[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE : Dict = '''single_label_classification'''
else:
SCREAMING_SNAKE_CASE : List[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE : Optional[Any] = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : Tuple = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE : str = loss_fct(_lowerCamelCase , _lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE : Optional[int] = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE : Optional[Any] = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE : Optional[Any] = loss_fct(_lowerCamelCase , _lowerCamelCase )
if not return_dict:
SCREAMING_SNAKE_CASE : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_lowerCamelCase , logits=_lowerCamelCase , hidden_states=outputs.hidden_states , )
| 313
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'ibert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Dict = force_dequant
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def UpperCAmelCase_( a__=None ):
"""simple docstring"""
if subparsers is not None:
SCREAMING_SNAKE_CASE : List[str] = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = script_name
else:
SCREAMING_SNAKE_CASE : List[str] = F"""--config_file={args.config_file} {script_name}"""
SCREAMING_SNAKE_CASE : str = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE : Optional[int] = execute_subprocess_async(a__ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = test_command_parser()
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
test_command(a__ )
if __name__ == "__main__":
main()
| 313
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : int = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 313
|
from maths.prime_check import is_prime
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(a__ )
if is_prime(a__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
SCREAMING_SNAKE_CASE : Dict = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE : Dict = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
SCREAMING_SNAKE_CASE : Dict = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Optional[int]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **_lowerCamelCase )
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **_lowerCamelCase )
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Optional[Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : List[Any] = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[str] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Dict = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor(do_normalize=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : str = self.get_image_processor()
SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : List[str] = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = '''lower newer'''
SCREAMING_SNAKE_CASE : str = processor(text=_lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : int = tokenizer(_lowerCamelCase , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : str = self.get_image_processor()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = '''lower newer'''
SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Dict = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : int = '''google/owlvit-base-patch32'''
SCREAMING_SNAKE_CASE : int = OwlViTProcessor.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = ['''cat''', '''nasa badge''']
SCREAMING_SNAKE_CASE : List[str] = processor(text=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Tuple = '''google/owlvit-base-patch32'''
SCREAMING_SNAKE_CASE : Any = OwlViTProcessor.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = [['''cat''', '''nasa badge'''], ['''person''']]
SCREAMING_SNAKE_CASE : int = processor(text=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = 16
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = max([len(_lowerCamelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = '''google/owlvit-base-patch32'''
SCREAMING_SNAKE_CASE : List[Any] = OwlViTProcessor.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = ['''cat''', '''nasa badge''']
SCREAMING_SNAKE_CASE : int = processor(text=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = 16
SCREAMING_SNAKE_CASE : str = inputs['''input_ids''']
SCREAMING_SNAKE_CASE : Tuple = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : str = processor(images=_lowerCamelCase , query_images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : List[str] = processor.batch_decode(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[str] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 32
@property
def __lowerCAmelCase ( self ) ->str:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Dict:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Tuple:
return 100
@property
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = self.dummy_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : List[str] = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
from __future__ import annotations
import numpy as np
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return np.maximum(0 , a__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 313
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a__ : List[str] = '''CompVis/stable-diffusion-v1-1'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
a__ : Any = '''CompVis/stable-diffusion-v1-3'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4'''
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str:
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) ->Dict[str, Any]:
return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 313
| 1
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0
if start < end:
SCREAMING_SNAKE_CASE : List[str] = randint(a__ , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = a[end]
SCREAMING_SNAKE_CASE : Dict = a[pivot]
SCREAMING_SNAKE_CASE : Optional[Any] = temp
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = _in_place_partition(a__ , a__ , a__ )
count += _in_place_quick_sort(a__ , a__ , p - 1 )
count += _in_place_quick_sort(a__ , p + 1 , a__ )
return count
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = randint(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = a[end]
SCREAMING_SNAKE_CASE : Dict = a[pivot]
SCREAMING_SNAKE_CASE : Optional[int] = temp
SCREAMING_SNAKE_CASE : int = start - 1
for index in range(a__ , a__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
SCREAMING_SNAKE_CASE : Optional[Any] = new_pivot_index + 1
SCREAMING_SNAKE_CASE : Optional[Any] = a[new_pivot_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = a[index]
SCREAMING_SNAKE_CASE : str = temp
SCREAMING_SNAKE_CASE : Optional[int] = a[new_pivot_index + 1]
SCREAMING_SNAKE_CASE : Tuple = a[end]
SCREAMING_SNAKE_CASE : List[str] = temp
return new_pivot_index + 1, count
a__ : List[Any] = TemporaryFile()
a__ : Tuple = 100 # 1000 elements are to be sorted
a__ , a__ : Optional[int] = 0, 1 # mean and standard deviation
a__ : int = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a__ : str = np.load(outfile)
a__ : Optional[int] = len(M) - 1
a__ : List[Any] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 313
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.