code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def __a ( __lowerCamelCase = 100_0000, __lowerCamelCase = 10 ):
UpperCAmelCase_ : Optional[Any] = defaultdict(__lowerCAmelCase )
for outer_width in range(3, (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase_ : str = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ), 1 )
else:
UpperCAmelCase_ : Optional[int] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowerCAmelCase, outer_width - 1, 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 61
|
import math
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
while num > 0:
lowerCAmelCase_ = num % 8
lowerCAmelCase_ = octal + (remainder * math.floor(math.pow(10 , __lowerCAmelCase ) ))
counter += 1
lowerCAmelCase_ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(__lowerCAmelCase )}"""
def lowerCamelCase__ ( ):
"""simple docstring"""
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 231
| 0
|
"""simple docstring"""
def lowercase ( _snake_case : str , _snake_case : str ) ->bool:
"""simple docstring"""
__snake_case : Optional[int] = len(_snake_case ) + 1
__snake_case : Optional[int] = len(_snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__snake_case : Tuple = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
# since string of zero length match pattern of zero length
__snake_case : Tuple = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _snake_case ):
__snake_case : List[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _snake_case ):
__snake_case : Optional[int] = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _snake_case ):
for j in range(1 , _snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__snake_case : List[str] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__snake_case : Union[str, Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__snake_case : str = dp[i - 1][j]
else:
__snake_case : str = 0
else:
__snake_case : Tuple = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
SCREAMING_SNAKE_CASE : Any = """aab"""
SCREAMING_SNAKE_CASE : str = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'{input_string} matches the given pattern {pattern}')
else:
print(F'{input_string} does not match with the given pattern {pattern}')
| 363
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='CLIPImageProcessor'
lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self , a_=None , a_=None , **a_ ):
'''simple docstring'''
__snake_case : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a_ , )
__snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a_ , a_ )
def __call__(self , a_=None , a_=None , a_=None , **a_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
__snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: list ):
"""simple docstring"""
if len(__UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(__UpperCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
SCREAMING_SNAKE_CASE : Any = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: list ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__UpperCamelCase ) )
]
def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: list ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__UpperCamelCase ) )
]
def lowercase__( __UpperCamelCase: list ):
"""simple docstring"""
if len(__UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
SCREAMING_SNAKE_CASE : int = len(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_length // 2
SCREAMING_SNAKE_CASE : Union[str, Any] = [[a[i][j] for j in range(__UpperCamelCase ,__UpperCamelCase )] for i in range(__UpperCamelCase )]
SCREAMING_SNAKE_CASE : List[str] = [
[a[i][j] for j in range(__UpperCamelCase ,__UpperCamelCase )] for i in range(__UpperCamelCase ,__UpperCamelCase )
]
SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(__UpperCamelCase )] for i in range(__UpperCamelCase )]
SCREAMING_SNAKE_CASE : str = [[a[i][j] for j in range(__UpperCamelCase )] for i in range(__UpperCamelCase ,__UpperCamelCase )]
return top_left, top_right, bot_left, bot_right
def lowercase__( __UpperCamelCase: list ):
"""simple docstring"""
return len(__UpperCamelCase ), len(matrix[0] )
def lowercase__( __UpperCamelCase: list ):
"""simple docstring"""
print('\n'.join(str(__UpperCamelCase ) for line in matrix ) )
def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: list ):
"""simple docstring"""
if matrix_dimensions(__UpperCamelCase ) == (2, 2):
return default_matrix_multiplication(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = split_matrix(__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = split_matrix(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(__UpperCamelCase ,matrix_subtraction(__UpperCamelCase ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = actual_strassen(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = actual_strassen(__UpperCamelCase ,matrix_subtraction(__UpperCamelCase ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = actual_strassen(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,matrix_addition(__UpperCamelCase ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = actual_strassen(matrix_subtraction(__UpperCamelCase ,__UpperCamelCase ) ,matrix_addition(__UpperCamelCase ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(matrix_subtraction(__UpperCamelCase ,__UpperCamelCase ) ,matrix_addition(__UpperCamelCase ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : str = matrix_addition(matrix_subtraction(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ) ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_addition(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = matrix_subtraction(matrix_subtraction(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ) ,__UpperCamelCase )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(len(__UpperCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__UpperCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: list ):
"""simple docstring"""
if matrix_dimensions(__UpperCamelCase )[1] != matrix_dimensions(__UpperCamelCase )[0]:
SCREAMING_SNAKE_CASE : Dict = (
'Unable to multiply these matrices, please check the dimensions.\n'
f"Matrix A: {matrixa}\n"
f"Matrix B: {matrixa}"
)
raise Exception(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = matrix_dimensions(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = matrix_dimensions(__UpperCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE : Union[str, Any] = max(*__UpperCamelCase ,*__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 ,math.ceil(math.loga(__UpperCamelCase ) ) ) )
SCREAMING_SNAKE_CASE : Optional[Any] = matrixa
SCREAMING_SNAKE_CASE : Optional[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,__UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE : Any = actual_strassen(__UpperCamelCase ,__UpperCamelCase )
# Removing the additional zeros
for i in range(0 ,__UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__UpperCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase_ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase_ = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 251
|
'''simple docstring'''
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = [int(__UpperCamelCase ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(__UpperCamelCase ) == 4 and all(0 <= int(__UpperCamelCase ) <= 2_54 for octet in octets )
if __name__ == "__main__":
UpperCamelCase_ = input().strip()
UpperCamelCase_ = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 251
| 1
|
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if any(not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(_lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 354
|
"""simple docstring"""
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = True
lowerCAmelCase__ :Tuple = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
order.append(_SCREAMING_SNAKE_CASE )
return order
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = True
lowerCAmelCase__ :Union[str, Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return component
def __A (_SCREAMING_SNAKE_CASE ) ->list[list[int]]:
"""simple docstring"""
lowerCAmelCase__ :Any = len(_SCREAMING_SNAKE_CASE ) * [False]
lowerCAmelCase__ :dict[int, list[int]] = {vert: [] for vert in range(len(_SCREAMING_SNAKE_CASE ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = []
for i, was_visited in enumerate(_SCREAMING_SNAKE_CASE ):
if not was_visited:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = []
lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) * [False]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :Dict = order[len(_SCREAMING_SNAKE_CASE ) - i - 1]
if not visited[vert]:
lowerCAmelCase__ :Union[str, Any] = find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
components_list.append(_SCREAMING_SNAKE_CASE )
return components_list
| 254
| 0
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 16 ):
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE_ = DatasetDict(
{
'''train''': dataset['''train'''].select(__lowerCamelCase ),
'''validation''': dataset['''train'''].select(__lowerCamelCase ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=__lowerCamelCase, max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ = datasets.map(
__lowerCamelCase, batched=__lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ = 8
else:
SCREAMING_SNAKE_CASE_ = None
return tokenizer.pad(
__lowerCamelCase, padding='''longest''', max_length=__lowerCamelCase, pad_to_multiple_of=__lowerCamelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets['''train'''], shuffle=__lowerCamelCase, collate_fn=__lowerCamelCase, batch_size=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets['''validation'''], shuffle=__lowerCamelCase, collate_fn=__lowerCamelCase, batch_size=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets['''test'''], shuffle=__lowerCamelCase, collate_fn=__lowerCamelCase, batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# New Code #
SCREAMING_SNAKE_CASE_ = []
# Download the dataset
SCREAMING_SNAKE_CASE_ = load_dataset('''glue''', '''mrpc''' )
# Create our splits
SCREAMING_SNAKE_CASE_ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE_ = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ = config['''lr''']
SCREAMING_SNAKE_CASE_ = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE_ = int(config['''seed'''] )
SCREAMING_SNAKE_CASE_ = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE_ = evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_ = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_ = MAX_GPU_BATCH_SIZE
set_seed(__lowerCamelCase )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE_ = kfold.split(np.zeros(datasets['''train'''].num_rows ), datasets['''train''']['''label'''] )
SCREAMING_SNAKE_CASE_ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_fold_dataloaders(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ = AdamW(params=model.parameters(), lr=__lowerCamelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase, num_warmup_steps=1_00, num_training_steps=(len(__lowerCamelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_ = model(**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.loss
SCREAMING_SNAKE_CASE_ = loss / gradient_accumulation_steps
accelerator.backward(__lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase, references=__lowerCamelCase, )
SCREAMING_SNAKE_CASE_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', __lowerCamelCase )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE_ = []
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__lowerCamelCase, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE_ = torch.cat(__lowerCamelCase, dim=0 )
SCREAMING_SNAKE_CASE_ = torch.stack(__lowerCamelCase, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ = metric.compute(predictions=__lowerCamelCase, references=__lowerCamelCase )
accelerator.print('''Average test metrics from all folds:''', __lowerCamelCase )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=__lowerCamelCase, default=__lowerCamelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''', type=__lowerCamelCase, default=3, help='''The number of splits to perform across the dataset''' )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
main()
| 299
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299
| 1
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : List[str] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowerCAmelCase : Optional[Any] = ''
lowerCAmelCase : str = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(UpperCAmelCase_ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowerCAmelCase : int = 0, 0
# length[i] shows the length of palindromic substring with center i
lowerCAmelCase : Optional[Any] = [1 for i in range(len(UpperCAmelCase_ ) )]
# for each character in new_string find corresponding palindromic string
lowerCAmelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
lowerCAmelCase : Optional[int] = 1 if j > r else min(length[l + r - j] // 2, r - j + 1 )
while (
j - k >= 0
and j + k < len(UpperCAmelCase_ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowerCAmelCase : Any = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowerCAmelCase : Dict = j - k + 1 # noqa: E741
lowerCAmelCase : Optional[int] = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowerCAmelCase : str = length[j]
lowerCAmelCase : Tuple = j
# create that string
lowerCAmelCase : Dict = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323
| 0
|
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = "linear"
_UpperCAmelCase :Dict = "cosine"
_UpperCAmelCase :int = "cosine_with_restarts"
_UpperCAmelCase :int = "polynomial"
_UpperCAmelCase :Optional[Any] = "constant"
_UpperCAmelCase :Optional[Any] = "constant_with_warmup"
_UpperCAmelCase :Union[str, Any] = "piecewise_constant"
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> Dict:
return LambdaLR(a__ , lambda __UpperCAmelCase : 1 , last_epoch=a__ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> Any:
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1.0 , a__ ) )
return 1.0
return LambdaLR(a__ , a__ , last_epoch=a__ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> Optional[Any]:
lowercase__: List[str] = {}
lowercase__: str = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowercase__, lowercase__: str = rule_str.split(''':''' )
lowercase__: List[str] = int(a__ )
lowercase__: int = float(a__ )
lowercase__: Optional[int] = value
lowercase__: Optional[int] = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
lowercase__: Optional[int] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(a__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase__: List[str] = create_rules_function(a__ , a__ )
return LambdaLR(a__ , a__ , last_epoch=a__ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> Union[str, Any]:
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(a__ , a__ , a__ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Optional[Any]:
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
lowercase__: List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(a__ ) * 2.0 * progress )) )
return LambdaLR(a__ , a__ , a__ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Optional[int]:
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
lowercase__: Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(a__ ) * progress) % 1.0) )) )
return LambdaLR(a__ , a__ , a__ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Any:
lowercase__: Optional[int] = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase__: List[str] = lr_init - lr_end
lowercase__: Optional[Any] = num_training_steps - num_warmup_steps
lowercase__: Union[str, Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase__: List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(a__ , a__ , a__ )
__A = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> List[str]:
lowercase__: Optional[int] = SchedulerType(a__ )
lowercase__: int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(a__ , last_epoch=a__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(a__ , step_rules=a__ , last_epoch=a__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(a__ , num_warmup_steps=a__ , last_epoch=a__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , num_cycles=a__ , last_epoch=a__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , power=a__ , last_epoch=a__ , )
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , last_epoch=a__ )
| 177
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 267
| 0
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_lowerCamelCase : List[Any] = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_lowerCamelCase : List[Any] = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def a__ ( UpperCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : str = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase )[0]
@deprecated(UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def a__ ( UpperCAmelCase : int ) -> int:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream:
UpperCAmelCase : int = _readaa(UpperCAmelCase )
if magic != 2_051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
UpperCAmelCase : Tuple = _readaa(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = _readaa(UpperCAmelCase )
UpperCAmelCase : int = _readaa(UpperCAmelCase )
UpperCAmelCase : List[str] = bytestream.read(rows * cols * num_images )
UpperCAmelCase : Optional[int] = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta )
UpperCAmelCase : Optional[Any] = data.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , 1 )
return data
@deprecated(UpperCAmelCase , '''Please use tf.one_hot on tensors.''' )
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Tuple ) -> List[str]:
UpperCAmelCase : int = labels_dense.shape[0]
UpperCAmelCase : Any = numpy.arange(UpperCAmelCase ) * num_classes
UpperCAmelCase : List[str] = numpy.zeros((num_labels, num_classes) )
UpperCAmelCase : Dict = 1
return labels_one_hot
@deprecated(UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Dict=10 ) -> Tuple:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream:
UpperCAmelCase : List[Any] = _readaa(UpperCAmelCase )
if magic != 2_049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
UpperCAmelCase : List[str] = _readaa(UpperCAmelCase )
UpperCAmelCase : Optional[int] = bytestream.read(UpperCAmelCase )
UpperCAmelCase : Dict = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCAmelCase , UpperCAmelCase )
return labels
class __UpperCAmelCase :
@deprecated(
__A, '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''', )
def __init__( self : Dict, __A : List[Any], __A : int, __A : Optional[int]=False, __A : Tuple=False, __A : List[str]=dtypes.floataa, __A : List[str]=True, __A : Tuple=None, ):
UpperCAmelCase : Optional[Any] = random_seed.get_seed(__A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
UpperCAmelCase : Optional[Any] = dtypes.as_dtype(__A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
UpperCAmelCase : Any = 1_0_0_0_0
UpperCAmelCase : str = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
UpperCAmelCase : Tuple = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
UpperCAmelCase : Optional[Any] = images.reshape(
images.shape[0], images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
UpperCAmelCase : Tuple = images.astype(numpy.floataa )
UpperCAmelCase : Tuple = numpy.multiply(__A, 1.0 / 2_5_5.0 )
UpperCAmelCase : str = images
UpperCAmelCase : Union[str, Any] = labels
UpperCAmelCase : int = 0
UpperCAmelCase : Any = 0
@property
def __magic_name__ ( self : List[str] ):
return self._images
@property
def __magic_name__ ( self : int ):
return self._labels
@property
def __magic_name__ ( self : str ):
return self._num_examples
@property
def __magic_name__ ( self : List[Any] ):
return self._epochs_completed
def __magic_name__ ( self : List[Any], __A : str, __A : Dict=False, __A : Union[str, Any]=True ):
if fake_data:
UpperCAmelCase : int = [1] * 7_8_4
UpperCAmelCase : Optional[int] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__A )],
[fake_label for _ in range(__A )],
)
UpperCAmelCase : Any = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
UpperCAmelCase : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(__A )
UpperCAmelCase : Optional[Any] = self.images[perma]
UpperCAmelCase : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
UpperCAmelCase : List[str] = self._num_examples - start
UpperCAmelCase : Tuple = self._images[start : self._num_examples]
UpperCAmelCase : List[str] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
UpperCAmelCase : Optional[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(__A )
UpperCAmelCase : Any = self.images[perm]
UpperCAmelCase : Any = self.labels[perm]
# Start next epoch
UpperCAmelCase : Any = 0
UpperCAmelCase : List[Any] = batch_size - rest_num_examples
UpperCAmelCase : str = self._index_in_epoch
UpperCAmelCase : List[Any] = self._images[start:end]
UpperCAmelCase : Dict = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part), axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part), axis=0 ),
)
else:
self._index_in_epoch += batch_size
UpperCAmelCase : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCAmelCase , '''Please write your own downloading logic.''' )
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ) -> Dict:
if not gfile.Exists(UpperCAmelCase ):
gfile.MakeDirs(UpperCAmelCase )
UpperCAmelCase : Tuple = os.path.join(UpperCAmelCase , UpperCAmelCase )
if not gfile.Exists(UpperCAmelCase ):
urllib.request.urlretrieve(UpperCAmelCase , UpperCAmelCase ) # noqa: S310
with gfile.GFile(UpperCAmelCase ) as f:
UpperCAmelCase : List[str] = f.size()
print('''Successfully downloaded''' , UpperCAmelCase , UpperCAmelCase , '''bytes.''' )
return filepath
@deprecated(
UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Union[str, Any]=dtypes.floataa , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[Any]=5_000 , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=DEFAULT_SOURCE_URL , ) -> List[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCAmelCase , one_hot=UpperCAmelCase , dtype=UpperCAmelCase , seed=UpperCAmelCase )
UpperCAmelCase : Tuple = fake()
UpperCAmelCase : int = fake()
UpperCAmelCase : Union[str, Any] = fake()
return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase )
if not source_url: # empty string check
UpperCAmelCase : Union[str, Any] = DEFAULT_SOURCE_URL
UpperCAmelCase : Optional[int] = '''train-images-idx3-ubyte.gz'''
UpperCAmelCase : str = '''train-labels-idx1-ubyte.gz'''
UpperCAmelCase : List[Any] = '''t10k-images-idx3-ubyte.gz'''
UpperCAmelCase : Optional[int] = '''t10k-labels-idx1-ubyte.gz'''
UpperCAmelCase : Any = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + train_images_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
UpperCAmelCase : Any = _extract_images(UpperCAmelCase )
UpperCAmelCase : List[Any] = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
UpperCAmelCase : List[str] = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase )
UpperCAmelCase : Optional[int] = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + test_images_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
UpperCAmelCase : Optional[int] = _extract_images(UpperCAmelCase )
UpperCAmelCase : Dict = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
UpperCAmelCase : Optional[int] = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase )
if not 0 <= validation_size <= len(UpperCAmelCase ):
UpperCAmelCase : List[str] = (
'''Validation size should be between 0 and '''
f'''{len(UpperCAmelCase )}. Received: {validation_size}.'''
)
raise ValueError(UpperCAmelCase )
UpperCAmelCase : Dict = train_images[:validation_size]
UpperCAmelCase : Optional[Any] = train_labels[:validation_size]
UpperCAmelCase : Union[str, Any] = train_images[validation_size:]
UpperCAmelCase : Optional[int] = train_labels[validation_size:]
UpperCAmelCase : Tuple = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
UpperCAmelCase : int = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
UpperCAmelCase : List[str] = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
UpperCAmelCase : Dict = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase )
| 365
|
import logging
import os
from .state import PartialState
class __UpperCAmelCase ( logging.LoggerAdapter ):
@staticmethod
def __magic_name__ ( __A : str ):
UpperCAmelCase : Dict = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __magic_name__ ( self : Union[str, Any], __A : Union[str, Any], __A : Union[str, Any], *__A : Optional[int], **__A : Tuple ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
UpperCAmelCase : List[str] = kwargs.pop('''main_process_only''', __A )
UpperCAmelCase : int = kwargs.pop('''in_order''', __A )
if self.isEnabledFor(__A ):
if self._should_log(__A ):
UpperCAmelCase , UpperCAmelCase : Dict = self.process(__A, __A )
self.logger.log(__A, __A, *__A, **__A )
elif in_order:
UpperCAmelCase : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.process(__A, __A )
self.logger.log(__A, __A, *__A, **__A )
state.wait_for_everyone()
def a__ ( UpperCAmelCase : str , UpperCAmelCase : str = None ) -> Dict:
if log_level is None:
UpperCAmelCase : Union[str, Any] = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCAmelCase )
UpperCAmelCase : Tuple = logging.getLogger(UpperCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCAmelCase , {} )
| 99
| 0
|
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
A : Optional[Any] = "http://www.mocksite.com/file1.txt"
A : Optional[Any] = "\"text\": [\"foo\", \"foo\"]"
A : List[str] = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =2_0_0
__UpperCAmelCase : List[str] ={'Content-Length': '100'}
__UpperCAmelCase : int ={}
def snake_case ( self , **__a ):
return [bytes(a__ , "utf-8" )]
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
import requests
monkeypatch.setattr(snake_case_ , "request" , snake_case_ )
__lowerCAmelCase = URL
if issubclass(snake_case_ , snake_case_ ):
__lowerCAmelCase = url
elif issubclass(snake_case_ , snake_case_ ):
__lowerCAmelCase = [url]
elif issubclass(snake_case_ , snake_case_ ):
__lowerCAmelCase = {"train": url}
__lowerCAmelCase = "dummy"
__lowerCAmelCase = "downloads"
__lowerCAmelCase = tmp_path
__lowerCAmelCase = DownloadConfig(
cache_dir=os.path.join(snake_case_ , snake_case_ ) , use_etag=snake_case_ , )
__lowerCAmelCase = DownloadManager(dataset_name=snake_case_ , download_config=snake_case_ )
__lowerCAmelCase = dl_manager.download(snake_case_ )
__lowerCAmelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = [downloaded_paths]
__lowerCAmelCase = [urls]
elif isinstance(snake_case_ , snake_case_ ):
assert "train" in downloaded_paths.keys()
__lowerCAmelCase = downloaded_paths.values()
__lowerCAmelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(snake_case_ , snake_case_ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__lowerCAmelCase = Path(snake_case_ )
__lowerCAmelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__lowerCAmelCase = downloaded_path.read_text()
assert content == CONTENT
__lowerCAmelCase = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
__lowerCAmelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = str(snake_case_ )
if issubclass(snake_case_ , snake_case_ ):
__lowerCAmelCase = filename
elif issubclass(snake_case_ , snake_case_ ):
__lowerCAmelCase = [filename]
elif issubclass(snake_case_ , snake_case_ ):
__lowerCAmelCase = {"train": filename}
__lowerCAmelCase = "dummy"
__lowerCAmelCase = xz_file.parent
__lowerCAmelCase = "extracted"
__lowerCAmelCase = DownloadConfig(
cache_dir=snake_case_ , use_etag=snake_case_ , )
__lowerCAmelCase = DownloadManager(dataset_name=snake_case_ , download_config=snake_case_ )
__lowerCAmelCase = dl_manager.extract(snake_case_ )
__lowerCAmelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = [extracted_paths]
__lowerCAmelCase = [paths]
elif isinstance(snake_case_ , snake_case_ ):
assert "train" in extracted_paths.keys()
__lowerCAmelCase = extracted_paths.values()
__lowerCAmelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(snake_case_ , snake_case_ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__lowerCAmelCase = Path(snake_case_ )
__lowerCAmelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(snake_case_ , etag=snake_case_ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__lowerCAmelCase = extracted_path.read_text()
__lowerCAmelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert path.endswith(".jsonl" )
for num_items, line in enumerate(snake_case_ , start=1 ):
__lowerCAmelCase = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = request.getfixturevalue(snake_case_ )
__lowerCAmelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
_test_jsonl(snake_case_ , snake_case_ )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = request.getfixturevalue(snake_case_ )
__lowerCAmelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
_test_jsonl(snake_case_ , snake_case_ )
assert num_tar == 1
assert num_jsonl == 2
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(snake_case_ ) , start=1 ):
assert os.path.basename(snake_case_ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 57
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a (self : int , a__ : List[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
__snake_case = np.random.RandomState(a__ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Any ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def a (self : List[str] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 24
| 0
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass(frozen=__lowerCamelCase)
class lowerCamelCase__:
UpperCAmelCase__ : str
UpperCAmelCase__ : str
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
@dataclass(frozen=__lowerCamelCase)
class lowerCamelCase__:
UpperCAmelCase__ : List[int]
UpperCAmelCase__ : Optional[List[int]] = None
UpperCAmelCase__ : Optional[List[int]] = None
UpperCAmelCase__ : Optional[Union[int, float]] = None
UpperCAmelCase__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[InputFeatures]
def __init__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: str , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: bool = False , ):
__lowerCamelCase = hans_processors[task]()
__lowerCamelCase = os.path.join(
UpperCamelCase_ , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(UpperCamelCase_ ) , UpperCamelCase_ , ) , )
__lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCamelCase, __lowerCamelCase = label_list[2], label_list[1]
__lowerCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase = cached_features_file + """.lock"""
with FileLock(UpperCamelCase_ ):
if os.path.exists(UpperCamelCase_ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
__lowerCamelCase = torch.load(UpperCamelCase_ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
__lowerCamelCase = (
processor.get_dev_examples(UpperCamelCase_ ) if evaluate else processor.get_train_examples(UpperCamelCase_ )
)
logger.info("""Training examples: %s""" , len(UpperCamelCase_ ) )
__lowerCamelCase = hans_convert_examples_to_features(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
logger.info("""Saving features into cached file %s""" , UpperCamelCase_ )
torch.save(self.features , UpperCamelCase_ )
def __len__( self: Optional[int] ):
return len(self.features )
def __getitem__( self: Any , UpperCamelCase_: Optional[int] ):
return self.features[i]
def lowerCAmelCase__ ( self: Dict ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCamelCase__:
UpperCAmelCase__ : List[InputFeatures]
def __init__( self: Dict , UpperCamelCase_: str , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: str , UpperCamelCase_: Optional[int] = 1_28 , UpperCamelCase_: Dict=False , UpperCamelCase_: bool = False , ):
__lowerCamelCase = hans_processors[task]()
__lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCamelCase, __lowerCamelCase = label_list[2], label_list[1]
__lowerCamelCase = label_list
__lowerCamelCase = processor.get_dev_examples(UpperCamelCase_ ) if evaluate else processor.get_train_examples(UpperCamelCase_ )
__lowerCamelCase = hans_convert_examples_to_features(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(UpperCamelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__lowerCamelCase = tf.data.Dataset.from_generator(
UpperCamelCase_ , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowerCAmelCase__ ( self: Optional[int] ):
return self.dataset
def __len__( self: Tuple ):
return len(self.features )
def __getitem__( self: Dict , UpperCamelCase_: List[Any] ):
return self.features[i]
def lowerCAmelCase__ ( self: Dict ):
return self.label_list
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase_ , """heuristics_train_set.txt""" ) ) , """train""" )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int ):
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase_ , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def lowerCAmelCase__ ( self: List[Any] ):
return ["contradiction", "entailment", "neutral"]
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] ):
__lowerCamelCase = []
for i, line in enumerate(UpperCamelCase_ ):
if i == 0:
continue
__lowerCamelCase = """%s-%s""" % (set_type, line[0])
__lowerCamelCase = line[5]
__lowerCamelCase = line[6]
__lowerCamelCase = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
__lowerCamelCase = line[0]
examples.append(InputExample(guid=UpperCamelCase_ , text_a=UpperCamelCase_ , text_b=UpperCamelCase_ , label=UpperCamelCase_ , pairID=UpperCamelCase_ ) )
return examples
def lowerCamelCase__ ( A__ : List[InputExample] , A__ : List[str] , A__ : int , A__ : PreTrainedTokenizer , ):
'''simple docstring'''
__lowerCamelCase = {label: i for i, label in enumerate(A__ )}
__lowerCamelCase = []
for ex_index, example in tqdm.tqdm(enumerate(A__ ) , desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
__lowerCamelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=A__ , max_length=A__ , padding="""max_length""" , truncation=A__ , return_overflowing_tokens=A__ , )
__lowerCamelCase = label_map[example.label] if example.label in label_map else 0
__lowerCamelCase = int(example.pairID )
features.append(InputFeatures(**A__ , label=A__ , pairID=A__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
UpperCAmelCase_ = {
'hans': 3,
}
UpperCAmelCase_ = {
'hans': HansProcessor,
}
| 351
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Tuple , *UpperCamelCase_: Dict , **UpperCamelCase_: Optional[int] ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , """decord""" )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[int]=None ):
__lowerCamelCase = {}
if frame_sampling_rate is not None:
__lowerCamelCase = frame_sampling_rate
if num_frames is not None:
__lowerCamelCase = num_frames
__lowerCamelCase = {}
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[str, List[str]] , **UpperCamelCase_: str ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=None , UpperCamelCase_: List[Any]=1 ):
if num_frames is None:
__lowerCamelCase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__lowerCamelCase = BytesIO(requests.get(UpperCamelCase_ ).content )
__lowerCamelCase = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
__lowerCamelCase = 0
__lowerCamelCase = num_frames * frame_sampling_rate - 1
__lowerCamelCase = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
__lowerCamelCase = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 0
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowercase : str = random.Random()
def SCREAMING_SNAKE_CASE__ ( __A , __A=1.0 , __A=None , __A=None ) -> str:
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=4_00 , lowerCAmelCase_=20_00 , lowerCAmelCase_=1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1_60_00 , lowerCAmelCase_=True , lowerCAmelCase_=80 , lowerCAmelCase_=16 , lowerCAmelCase_=64 , lowerCAmelCase_="hann_window" , lowerCAmelCase_=80 , lowerCAmelCase_=76_00 , lowerCAmelCase_=1E-10 , lowerCAmelCase_=True , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = feature_size
_snake_case = padding_value
_snake_case = sampling_rate
_snake_case = do_normalize
_snake_case = num_mel_bins
_snake_case = hop_length
_snake_case = win_length
_snake_case = win_function
_snake_case = fmin
_snake_case = fmax
_snake_case = mel_floor
_snake_case = return_attention_mask
def lowerCamelCase ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCamelCase ( self , lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
def _flatten(lowerCAmelCase_ ):
return list(itertools.chain(*lowerCAmelCase_ ) )
if equal_length:
_snake_case = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_snake_case = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs]
return speech_inputs
def lowerCamelCase ( self , lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
if equal_length:
_snake_case = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = SpeechTaFeatureExtractor
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = SpeechTaFeatureExtractionTester(self )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowerCAmelCase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase_ , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_snake_case = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs]
# Test not batched input
_snake_case = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
_snake_case = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# Test batched
_snake_case = feat_extract(lowerCAmelCase_ , return_tensors='np' ).input_values
_snake_case = feat_extract(lowerCAmelCase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_snake_case = ['longest', 'max_length', 'do_not_pad']
_snake_case = [None, 16_00, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = feat_extract(lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors='np' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = range(8_00 , 14_00 , 2_00 )
_snake_case = [floats_list((1, x) )[0] for x in lengths]
_snake_case = ['longest', 'max_length', 'do_not_pad']
_snake_case = [None, 16_00, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = feat_extract(lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_snake_case = feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10_00 , padding='max_length' , return_tensors='np' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_snake_case = feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10_00 , padding='longest' , return_tensors='np' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
_snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_snake_case = feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=20_00 , padding='longest' , return_tensors='np' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = np.random.rand(1_00 ).astype(np.floataa )
_snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_snake_case = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_snake_case = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs]
# Test feature size
_snake_case = feature_extractor(audio_target=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
_snake_case = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
_snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# Test batched
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_values
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_snake_case = np.asarray(lowerCAmelCase_ )
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_values
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_tester.prepare_inputs_for_target()
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , processed_features[input_name] ) ) )
_snake_case = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase_ )
_snake_case = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
_snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase_ )
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
_snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = self.feat_extract_tester.prepare_inputs_for_target()
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = feat_extract.num_mel_bins # hack!
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='np' )[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_dict
_snake_case = True
_snake_case = self.feature_extraction_class(**lowerCAmelCase_ )
_snake_case = self.feat_extract_tester.prepare_inputs_for_target()
_snake_case = [len(lowerCAmelCase_ ) for x in speech_inputs]
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = feat_extract.num_mel_bins # hack!
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_dict
_snake_case = True
_snake_case = self.feature_extraction_class(**lowerCAmelCase_ )
_snake_case = self.feat_extract_tester.prepare_inputs_for_target()
_snake_case = [len(lowerCAmelCase_ ) for x in speech_inputs]
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = min(lowerCAmelCase_ )
_snake_case = feat_extract.num_mel_bins # hack!
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='np' )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
from datasets import load_dataset
_snake_case = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_snake_case = ds.sort('id' ).select(range(lowerCAmelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
_snake_case = self._load_datasamples(1 )
_snake_case = SpeechTaFeatureExtractor()
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase_ , atol=1E-6 ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
_snake_case = self._load_datasamples(1 )
_snake_case = SpeechTaFeatureExtractor()
_snake_case = feature_extractor(audio_target=lowerCAmelCase_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase_ , atol=1E-4 ) )
| 42
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any="attention" ):
"""simple docstring"""
__UpperCAmelCase : int = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
__UpperCAmelCase : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCAmelCase : Tuple = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
__UpperCAmelCase : List[str] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCAmelCase : List[str] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
__UpperCAmelCase : List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCAmelCase : Optional[Any] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
__UpperCAmelCase : Dict = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]=False ):
"""simple docstring"""
if split_mlp_wi:
__UpperCAmelCase : List[str] = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
__UpperCAmelCase : Union[str, Any] = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
__UpperCAmelCase : Dict = (wi_a, wi_a)
else:
__UpperCAmelCase : Union[str, Any] = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
__UpperCAmelCase : Tuple = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def lowercase_ ( lowerCAmelCase__ : dict , *, lowerCAmelCase__ : int , lowerCAmelCase__ : bool , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
__UpperCAmelCase : Tuple = traverse_util.flatten_dict(variables["""target"""] )
__UpperCAmelCase : Union[str, Any] = {"""/""".join(lowerCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCAmelCase : Any = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase__ )
__UpperCAmelCase : Any = collections.OrderedDict()
# Shared embeddings.
__UpperCAmelCase : int = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase : Union[str, Any] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """attention""" )
__UpperCAmelCase : Any = layer_norm
__UpperCAmelCase : List[Any] = k.T
__UpperCAmelCase : Optional[int] = o.T
__UpperCAmelCase : str = q.T
__UpperCAmelCase : Any = v.T
# Block i, layer 1 (MLP).
__UpperCAmelCase : List[str] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_mlp_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase : int = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = layer_norm
if split_mlp_wi:
__UpperCAmelCase : List[Any] = wi[0].T
__UpperCAmelCase : Any = wi[1].T
else:
__UpperCAmelCase : Tuple = wi.T
__UpperCAmelCase : Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCAmelCase : Dict = tax_relpos_bias_lookup(
lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" ).T
__UpperCAmelCase : Optional[int] = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
__UpperCAmelCase : Any = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , """encoder""" ).T
__UpperCAmelCase : Dict = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase : str = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_self_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """self_attention""" )
__UpperCAmelCase : int = layer_norm
__UpperCAmelCase : Optional[Any] = k.T
__UpperCAmelCase : Dict = o.T
__UpperCAmelCase : int = q.T
__UpperCAmelCase : List[str] = v.T
# Block i, layer 1 (Cross Attention).
__UpperCAmelCase : Any = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """encoder_decoder_attention""" )
__UpperCAmelCase : Union[str, Any] = layer_norm
__UpperCAmelCase : List[Any] = k.T
__UpperCAmelCase : int = o.T
__UpperCAmelCase : Optional[int] = q.T
__UpperCAmelCase : Optional[int] = v.T
# Block i, layer 2 (MLP).
__UpperCAmelCase : Tuple = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_mlp_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase : Any = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = layer_norm
if split_mlp_wi:
__UpperCAmelCase : Optional[Any] = wi[0].T
__UpperCAmelCase : Optional[int] = wi[1].T
else:
__UpperCAmelCase : str = wi.T
__UpperCAmelCase : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCAmelCase : Union[str, Any] = tax_relpos_bias_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" ).T
__UpperCAmelCase : Dict = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCAmelCase : List[str] = old["""decoder/logits_dense/kernel"""].T
return new
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : bool ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase : str = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase : List[str] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__UpperCAmelCase : Union[str, Any] = state_dict["""shared.weight"""]
return state_dict
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase : Tuple = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
__UpperCAmelCase : Any = convert_tax_to_pytorch(
lowerCAmelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase__ , scalable_attention=lowerCAmelCase__ )
__UpperCAmelCase : str = make_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = MTaConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCAmelCase : List[Any] = UMTaEncoderModel(lowerCAmelCase__ )
else:
__UpperCAmelCase : Dict = UMTaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase__ )
print("""Done""" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
_UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 254
| 0
|
"""simple docstring"""
def lowercase ( a__ : int ) -> List[Any]:
_UpperCamelCase = 0
_UpperCamelCase = len(a__ )
for i in range(n - 1 ):
for j in range(i + 1 , a__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase ( a__ : Dict ) -> str:
if len(a__ ) <= 1:
return arr, 0
_UpperCamelCase = len(a__ ) // 2
_UpperCamelCase = arr[0:mid]
_UpperCamelCase = arr[mid:]
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
_UpperCamelCase , _UpperCamelCase = _count_cross_inversions(a__ , a__ )
_UpperCamelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase ( a__ : Optional[Any] , a__ : Any ) -> int:
_UpperCamelCase = []
_UpperCamelCase = _UpperCamelCase = _UpperCamelCase = 0
while i < len(a__ ) and j < len(a__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(a__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(a__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase ( ) -> List[Any]:
_UpperCamelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_UpperCamelCase = count_inversions_bf(a__ )
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , a__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_UpperCamelCase = count_inversions_bf(a__ )
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , a__ )
# an empty list should also have zero inversions
_UpperCamelCase = []
_UpperCamelCase = count_inversions_bf(a__ )
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , a__ )
if __name__ == "__main__":
main()
| 54
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCAmelCase = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def lowercase ( a__ : Union[str, Any] , a__ : int , a__ : List[Any] , a__ : Union[str, Any] , a__ : Tuple , a__ : List[Any] ) -> Optional[Any]:
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(a__ ) , version.parse(a__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( a__ : str , a__ : Optional[str] = None ) -> None:
_UpperCamelCase = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , a__ ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = requirement, None, None
else:
_UpperCamelCase = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , a__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
_UpperCamelCase , _UpperCamelCase = match[0]
_UpperCamelCase = want_full.split(''',''' ) # there could be multiple requirements
_UpperCamelCase = {}
for w in want_range:
_UpperCamelCase = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , a__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
_UpperCamelCase , _UpperCamelCase = match[0]
_UpperCamelCase = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
_UpperCamelCase = '''.'''.join([str(a__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(a__ , a__ , a__ , a__ , a__ , a__ )
return
# check if any version is installed
try:
_UpperCamelCase = importlib.metadata.version(a__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(a__ , a__ , a__ , a__ , a__ , a__ )
def lowercase ( a__ : Tuple ) -> Any:
_UpperCamelCase = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(a__ , a__ )
| 54
| 1
|
from __future__ import annotations
from math import pow, sqrt
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) + pow(SCREAMING_SNAKE_CASE , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 323
| 0
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , """num_attention_heads""" ) )
class lowercase_ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=1_3 , __UpperCamelCase=3_2 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=6_4_0 , __UpperCamelCase=4 , __UpperCamelCase="silu" , __UpperCamelCase=3 , __UpperCamelCase=3_2 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=1_0 , __UpperCamelCase=None , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = last_hidden_size
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = conv_kernel_size
UpperCamelCase_ = output_stride
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = classifier_dropout_prob
UpperCamelCase_ = use_labels
UpperCamelCase_ = is_training
UpperCamelCase_ = num_labels
UpperCamelCase_ = initializer_range
UpperCamelCase_ = scope
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self ):
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = MobileViTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = MobileViTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = MobileViTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Dict = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A__ : Optional[int] = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : List[str] = False
A__ : List[Any] = False
A__ : int = False
A__ : Dict = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = MobileViTModelTester(self )
UpperCamelCase_ = MobileViTConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ = outputs.hidden_states
UpperCamelCase_ = 5
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase_ = 2
for i in range(len(__UpperCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = MobileViTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCamelCase__ ( ) -> Optional[int]:
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(__UpperCamelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**__UpperCamelCase )
# verify the logits
UpperCamelCase_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
UpperCamelCase_ = model.to(__UpperCamelCase )
UpperCamelCase_ = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**__UpperCamelCase )
UpperCamelCase_ = outputs.logits
# verify the logits
UpperCamelCase_ = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor(
[
[[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]],
[[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]],
[[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]],
] , device=__UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
UpperCamelCase_ = model.to(__UpperCamelCase )
UpperCamelCase_ = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**__UpperCamelCase )
UpperCamelCase_ = outputs.logits.detach().cpu()
UpperCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase , target_sizes=[(5_0, 6_0)] )
UpperCamelCase_ = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
UpperCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase )
UpperCamelCase_ = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
| 261
|
def lowerCamelCase__ ( a__ : Optional[int] , a__ : Any ) -> Optional[Any]:
UpperCamelCase_ = 0
UpperCamelCase_ = len(a__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(a__ ):
return None
UpperCamelCase_ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCamelCase_ = left
UpperCamelCase_ = point
elif point > right:
UpperCamelCase_ = right
UpperCamelCase_ = point
else:
if item < current_item:
UpperCamelCase_ = point - 1
else:
UpperCamelCase_ = point + 1
return None
def lowerCamelCase__ ( a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : List[Any] ) -> Any:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(a__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(a__ , a__ , a__ , a__ )
elif point > right:
return interpolation_search_by_recursion(a__ , a__ , a__ , a__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
a__ , a__ , a__ , point - 1 )
else:
return interpolation_search_by_recursion(
a__ , a__ , point + 1 , a__ )
def lowerCamelCase__ ( a__ : Tuple ) -> Any:
if collection != sorted(a__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
_A = 0
if debug == 1:
_A = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
_A = 67
_A = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('''Not found''')
| 261
| 1
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_lowercase : List[Any] = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_lowercase : Any = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
lowercase_ : Optional[Any] = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=A__ )[0]
@deprecated(A__ , '''Please use tf.data to implement this functionality.''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=A__ ) as bytestream:
lowercase_ : Dict = _readaa(A__ )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowercase_ : Optional[Any] = _readaa(A__ )
lowercase_ : Union[str, Any] = _readaa(A__ )
lowercase_ : Tuple = _readaa(A__ )
lowercase_ : Any = bytestream.read(rows * cols * num_images )
lowercase_ : List[Any] = numpy.frombuffer(A__ , dtype=numpy.uinta )
lowercase_ : Any = data.reshape(A__ , A__ , A__ , 1 )
return data
@deprecated(A__ , '''Please use tf.one_hot on tensors.''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : str = labels_dense.shape[0]
lowercase_ : Union[str, Any] = numpy.arange(A__ ) * num_classes
lowercase_ : Any = numpy.zeros((num_labels, num_classes) )
lowercase_ : int = 1
return labels_one_hot
@deprecated(A__ , '''Please use tf.data to implement this functionality.''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Optional[int]=10 ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=A__ ) as bytestream:
lowercase_ : Optional[int] = _readaa(A__ )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowercase_ : Optional[int] = _readaa(A__ )
lowercase_ : Any = bytestream.read(A__ )
lowercase_ : Tuple = numpy.frombuffer(A__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(A__ , A__ )
return labels
class lowerCAmelCase__ :
@deprecated(
__SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=dtypes.floataa , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
lowercase_ : int = random_seed.get_seed(__SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase_ : Union[str, Any] = dtypes.as_dtype(__SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
lowercase_ : str = 1_00_00
lowercase_ : Optional[int] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase_ : List[str] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase_ : str = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase_ : Tuple = images.astype(numpy.floataa )
lowercase_ : List[str] = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_55.0 )
lowercase_ : Optional[int] = images
lowercase_ : Tuple = labels
lowercase_ : Any = 0
lowercase_ : Dict = 0
@property
def _snake_case ( self ):
"""simple docstring"""
return self._images
@property
def _snake_case ( self ):
"""simple docstring"""
return self._labels
@property
def _snake_case ( self ):
"""simple docstring"""
return self._num_examples
@property
def _snake_case ( self ):
"""simple docstring"""
return self._epochs_completed
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
if fake_data:
lowercase_ : Dict = [1] * 7_84
lowercase_ : List[Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(__SCREAMING_SNAKE_CASE )],
)
lowercase_ : Dict = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase_ : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = self.images[perma]
lowercase_ : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase_ : Union[str, Any] = self._num_examples - start
lowercase_ : Optional[int] = self._images[start : self._num_examples]
lowercase_ : Any = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase_ : List[str] = numpy.arange(self._num_examples )
numpy.random.shuffle(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = self.images[perm]
lowercase_ : str = self.labels[perm]
# Start next epoch
lowercase_ : List[Any] = 0
lowercase_ : Tuple = batch_size - rest_num_examples
lowercase_ : str = self._index_in_epoch
lowercase_ : Optional[Any] = self._images[start:end]
lowercase_ : List[str] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase_ : str = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(A__ , '''Please write your own downloading logic.''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if not gfile.Exists(A__ ):
gfile.MakeDirs(A__ )
lowercase_ : Union[str, Any] = os.path.join(A__ , A__ )
if not gfile.Exists(A__ ):
urllib.request.urlretrieve(A__ , A__ ) # noqa: S310
with gfile.GFile(A__ ) as f:
lowercase_ : Optional[Any] = f.size()
print('''Successfully downloaded''' , A__ , A__ , '''bytes.''' )
return filepath
@deprecated(
A__ , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Optional[int]=dtypes.floataa , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Tuple=5000 , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=DEFAULT_SOURCE_URL , ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=A__ , one_hot=A__ , dtype=A__ , seed=A__ )
lowercase_ : List[str] = fake()
lowercase_ : int = fake()
lowercase_ : str = fake()
return _Datasets(train=A__ , validation=A__ , test=A__ )
if not source_url: # empty string check
lowercase_ : List[str] = DEFAULT_SOURCE_URL
lowercase_ : Tuple = 'train-images-idx3-ubyte.gz'
lowercase_ : str = 'train-labels-idx1-ubyte.gz'
lowercase_ : str = 't10k-images-idx3-ubyte.gz'
lowercase_ : List[Any] = 't10k-labels-idx1-ubyte.gz'
lowercase_ : Dict = _maybe_download(
A__ , A__ , source_url + train_images_file )
with gfile.Open(A__ , '''rb''' ) as f:
lowercase_ : Union[str, Any] = _extract_images(A__ )
lowercase_ : Any = _maybe_download(
A__ , A__ , source_url + train_labels_file )
with gfile.Open(A__ , '''rb''' ) as f:
lowercase_ : List[Any] = _extract_labels(A__ , one_hot=A__ )
lowercase_ : Union[str, Any] = _maybe_download(
A__ , A__ , source_url + test_images_file )
with gfile.Open(A__ , '''rb''' ) as f:
lowercase_ : Optional[Any] = _extract_images(A__ )
lowercase_ : List[Any] = _maybe_download(
A__ , A__ , source_url + test_labels_file )
with gfile.Open(A__ , '''rb''' ) as f:
lowercase_ : List[str] = _extract_labels(A__ , one_hot=A__ )
if not 0 <= validation_size <= len(A__ ):
lowercase_ : str = (
'Validation size should be between 0 and '
F'''{len(A__ )}. Received: {validation_size}.'''
)
raise ValueError(A__ )
lowercase_ : Union[str, Any] = train_images[:validation_size]
lowercase_ : str = train_labels[:validation_size]
lowercase_ : Optional[int] = train_images[validation_size:]
lowercase_ : str = train_labels[validation_size:]
lowercase_ : Optional[int] = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
lowercase_ : Dict = _DataSet(A__ , A__ , **A__ )
lowercase_ : Union[str, Any] = _DataSet(A__ , A__ , **A__ )
lowercase_ : Dict = _DataSet(A__ , A__ , **A__ )
return _Datasets(train=A__ , validation=A__ , test=A__ )
| 93
|
import math
import random
def A_ ( A__ , A__ = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowercase : Optional[Any] = 0.02
def A_ ( A__ , A__ ) -> float:
a__ : int = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(A__ ):
# Forward propagation
a__ : Union[str, Any] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
a__ : Optional[Any] = (expected / 100) - layer_a
# Error delta
a__ : Optional[int] = layer_1_error * sigmoid_function(A__ , A__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Dict = int(input("""Expected value: """))
lowercase : Dict = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 99
| 0
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Tuple , lowercase_ : Any=13 , lowercase_ : Optional[Any]=7 , lowercase_ : Tuple=True , lowercase_ : List[Any]=True , lowercase_ : Tuple=True , lowercase_ : Optional[int]=True , lowercase_ : Any=99 , lowercase_ : Optional[int]=16 , lowercase_ : Dict=36 , lowercase_ : Dict=6 , lowercase_ : Any=6 , lowercase_ : List[str]=6 , lowercase_ : Optional[Any]=37 , lowercase_ : Dict="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : str=512 , lowercase_ : Dict=16 , lowercase_ : Optional[Any]=2 , lowercase_ : int=0.02 , lowercase_ : Any=3 , lowercase_ : List[str]=4 , lowercase_ : Dict=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = seq_length
SCREAMING_SNAKE_CASE_ : List[str] = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : str = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = embedding_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_groups
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : int = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = num_labels
SCREAMING_SNAKE_CASE_ : int = num_choices
SCREAMING_SNAKE_CASE_ : List[Any] = scope
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlbertModel(config=A__)
model.to(A__)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(A__ , attention_mask=A__ , token_type_ids=A__)
SCREAMING_SNAKE_CASE_ : str = model(A__ , token_type_ids=A__)
SCREAMING_SNAKE_CASE_ : Any = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : str , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = AlbertForPreTraining(config=A__)
model.to(A__)
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , sentence_order_label=A__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = AlbertForMaskedLM(config=A__)
model.to(A__)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlbertForQuestionAnswering(config=A__)
model.to(A__)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.num_labels
SCREAMING_SNAKE_CASE_ : str = AlbertForSequenceClassification(A__)
model.to(A__)
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : int = AlbertForTokenClassification(config=A__)
model.to(A__)
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[Any] = AlbertForMultipleChoice(config=A__)
model.to(A__)
model.eval()
SCREAMING_SNAKE_CASE_ : int = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE_ : int = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE_ : Dict = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = super()._prepare_for_class(A__ , A__ , return_labels=A__)
if return_labels:
if model_class in get_values(A__):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A__)
SCREAMING_SNAKE_CASE_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__)
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = AlbertModelTester(self)
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=A__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = type
self.model_tester.create_and_check_model(*A__)
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Dict = AlbertModel.from_pretrained(A__)
self.assertIsNotNone(A__)
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = AlbertModel.from_pretrained('''albert-base-v2''')
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(A__ , attention_mask=A__)[0]
SCREAMING_SNAKE_CASE_ : List[str] = torch.Size((1, 11, 768))
self.assertEqual(output.shape , A__)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A__ , atol=1e-4))
| 359
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = ["""model.decoder.embed_positions.weights"""]
def _A (__a ) -> Dict:
"""simple docstring"""
if "emb" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _A (__a , __a ) -> Tuple[Dict, Dict]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(state_dict.keys() )
SCREAMING_SNAKE_CASE_ : int = {}
for key in keys:
SCREAMING_SNAKE_CASE_ : int = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ : int = rename_keys(__a )
if "in_proj_weight" in key:
# split fused qkv proj
SCREAMING_SNAKE_CASE_ : List[str] = val[:hidden_size, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[hidden_size : 2 * hidden_size, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
SCREAMING_SNAKE_CASE_ : int = val
else:
SCREAMING_SNAKE_CASE_ : Any = val
return state_dict, enc_dec_proj_state_dict
def _A (__a ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24
SCREAMING_SNAKE_CASE_ : Tuple = 24
SCREAMING_SNAKE_CASE_ : Optional[Any] = 16
elif checkpoint == "medium":
SCREAMING_SNAKE_CASE_ : List[str] = 15_36
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : Optional[int] = 24
elif checkpoint == "large":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 20_48
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : int = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
SCREAMING_SNAKE_CASE_ : List[Any] = MusicgenDecoderConfig(
hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , )
return config
@torch.no_grad()
def _A (__a , __a=None , __a=None , __a="cpu" ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = MusicGen.get_pretrained(__a , device=__a )
SCREAMING_SNAKE_CASE_ : Dict = decoder_config_from_checkpoint(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = fairseq_model.lm.state_dict()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rename_state_dict(
__a , hidden_size=decoder_config.hidden_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
SCREAMING_SNAKE_CASE_ : int = MusicgenForCausalLM(__a ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(__a , strict=__a )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__a )
if len(__a ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__a ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
SCREAMING_SNAKE_CASE_ : str = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__a )
# check we can do a forward pass
SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=__a , decoder_input_ids=__a ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : str = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
SCREAMING_SNAKE_CASE_ : Tuple = MusicgenProcessor(feature_extractor=__a , tokenizer=__a )
# set the appropriate bos/pad token ids
SCREAMING_SNAKE_CASE_ : str = 20_48
SCREAMING_SNAKE_CASE_ : List[Any] = 20_48
# set other default generation config params
SCREAMING_SNAKE_CASE_ : int = int(30 * audio_encoder.config.frame_rate )
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__a ).mkdir(exist_ok=__a )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__a )
processor.push_to_hub(__a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 318
| 0
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _snake_case ( _snake_case ):
lowerCAmelCase_ : Tuple = '''trajectory_transformer'''
lowerCAmelCase_ : Tuple = ['''past_key_values''']
lowerCAmelCase_ : int = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a__=100 , a__=5 , a__=1 , a__=1 , a__=249 , a__=6 , a__=17 , a__=25 , a__=4 , a__=4 , a__=128 , a__=0.1 , a__=0.1 , a__=0.1 , a__=0.0_0_0_6 , a__=512 , a__=0.0_2 , a__=1e-12 , a__=1 , a__=True , a__=1 , a__=50_256 , a__=50_256 , **a__ , ) -> int:
'''simple docstring'''
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
| 85
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__snake_case = logging.get_logger(__name__)
__snake_case = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__ ( __lowerCAmelCase ):
A__ : Optional[int] ="layoutlmv3"
def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any]=50265 , UpperCAmelCase_ : Tuple=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Optional[int]=3072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Tuple=1e-5 , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : str=1024 , UpperCAmelCase_ : Dict=128 , UpperCAmelCase_ : Union[str, Any]=128 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : List[Any]=128 , UpperCAmelCase_ : Optional[int]=64 , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Dict=224 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : Dict=None , **UpperCAmelCase_ : List[Any] , ):
super().__init__(
vocab_size=lowerCamelCase__ , hidden_size=lowerCamelCase__ , num_hidden_layers=lowerCamelCase__ , num_attention_heads=lowerCamelCase__ , intermediate_size=lowerCamelCase__ , hidden_act=lowerCamelCase__ , hidden_dropout_prob=lowerCamelCase__ , attention_probs_dropout_prob=lowerCamelCase__ , max_position_embeddings=lowerCamelCase__ , type_vocab_size=lowerCamelCase__ , initializer_range=lowerCamelCase__ , layer_norm_eps=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
SCREAMING_SNAKE_CASE__ = max_ad_position_embeddings
SCREAMING_SNAKE_CASE__ = coordinate_size
SCREAMING_SNAKE_CASE__ = shape_size
SCREAMING_SNAKE_CASE__ = has_relative_attention_bias
SCREAMING_SNAKE_CASE__ = rel_pos_bins
SCREAMING_SNAKE_CASE__ = max_rel_pos
SCREAMING_SNAKE_CASE__ = has_spatial_attention_bias
SCREAMING_SNAKE_CASE__ = rel_ad_pos_bins
SCREAMING_SNAKE_CASE__ = max_rel_ad_pos
SCREAMING_SNAKE_CASE__ = text_embed
SCREAMING_SNAKE_CASE__ = visual_embed
SCREAMING_SNAKE_CASE__ = input_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = classifier_dropout
class lowercase__ ( __lowerCAmelCase ):
A__ : str =version.parse("""1.12""" )
@property
def A_ ( self : Tuple ):
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def A_ ( self : Union[str, Any] ):
return 1e-5
@property
def A_ ( self : Tuple ):
return 12
def A_ ( self : Any , UpperCAmelCase_ : "ProcessorMixin" , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , ):
setattr(processor.image_processor , 'apply_ocr' , lowerCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ = compute_effective_axis_dimension(
lowerCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ = compute_effective_axis_dimension(
lowerCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE__ = self._generate_dummy_images(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ = dict(
processor(
lowerCamelCase__ , text=lowerCamelCase__ , boxes=lowerCamelCase__ , return_tensors=lowerCamelCase__ , ) )
return inputs
| 357
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__snake_case = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__snake_case = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__snake_case = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
__snake_case = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
__snake_case = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
__snake_case = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
__snake_case = tf.keras.preprocessing.image.img_to_array(test_image)
__snake_case = np.expand_dims(test_image, axis=0)
__snake_case = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__snake_case = """Normal"""
if result[0][0] == 1:
__snake_case = """Abnormality detected"""
| 169
| 0
|
"""simple docstring"""
from collections import namedtuple
a__ : Tuple = namedtuple('''from_to''', '''from_ to''')
a__ : str = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_01, 1_0_0_0),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_04_54, 2_64.1_72),
'''cubicyard''': from_to(0.7_64_55, 1.3_07_95),
'''cubicfoot''': from_to(0.0_28, 35.31_47),
'''cup''': from_to(0.0_00_23_65_88, 42_26.75),
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ", ".join(lowerCAmelCase_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ", ".join(lowerCAmelCase_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
__SCREAMING_SNAKE_CASE = len(bin(lowerCAmelCase_ )[3:] )
__SCREAMING_SNAKE_CASE = bin(abs(lowerCAmelCase_ ) - (1 << binary_number_length) )[3:]
__SCREAMING_SNAKE_CASE = (
(
"1"
+ "0" * (binary_number_length - len(lowerCAmelCase_ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Union[str, Any] ):
__UpperCAmelCase = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{\"default\": {\"dataset_size\": 42}}''' )
__UpperCAmelCase = DatasetInfosDict.from_directory(snake_case_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :DatasetInfo ):
__UpperCAmelCase = str(snake_case_ )
dataset_info.write_to_directory(snake_case_ )
__UpperCAmelCase = DatasetInfo.from_directory(snake_case_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(snake_case_ , '''dataset_info.json''' ) )
def lowercase__ ( ):
__UpperCAmelCase = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
__UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(snake_case_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__UpperCAmelCase = yaml.safe_dump(snake_case_ )
__UpperCAmelCase = yaml.safe_load(snake_case_ )
assert dataset_info_yaml_dict == reloaded
def lowercase__ ( ):
__UpperCAmelCase = DatasetInfo()
__UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :DatasetInfosDict ):
__UpperCAmelCase = str(snake_case_ )
dataset_infos_dict.write_to_directory(snake_case_ )
__UpperCAmelCase = DatasetInfosDict.from_directory(snake_case_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(snake_case_ , '''README.md''' ) )
| 353
|
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : Tuple = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
_lowercase : str = {'allegro/herbert-base-cased': 5_14}
_lowercase : Tuple = {}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = PRETRAINED_INIT_CONFIGURATION
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = HerbertTokenizer
def __init__( self : List[Any] , _lowercase : Optional[int]=None , _lowercase : int=None , _lowercase : Tuple=None , _lowercase : str="<s>" , _lowercase : List[str]="<unk>" , _lowercase : int="<pad>" , _lowercase : str="<mask>" , _lowercase : List[Any]="</s>" , **_lowercase : List[Any] , ):
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sep_token=_lowercase , **_lowercase , )
def a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : str , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 86
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
SCREAMING_SNAKE_CASE__:List[str] = None
SCREAMING_SNAKE_CASE__:str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[str] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Any = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__:int = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
SCREAMING_SNAKE_CASE__:Optional[int] = """▁"""
class snake_case__ ( snake_case_ ):
_snake_case : Any = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : str = BigBirdTokenizer
_snake_case : Tuple = ["""input_ids""", """attention_mask"""]
_snake_case : List[int] = []
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase="[SEP]" , lowerCamelCase="[MASK]" , lowerCamelCase="[CLS]" , **lowerCamelCase , ):
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , **lowerCamelCase , )
__a = vocab_file
__a = False if not self.vocab_file else True
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 261
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__:List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 261
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCAmelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : tuple , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , enable_onnx_checker=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
else:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
UpperCAmelCase__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCAmelCase__ = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
UpperCAmelCase__ = """cpu"""
UpperCAmelCase__ = Path(SCREAMING_SNAKE_CASE__ )
# VAE DECODER
UpperCAmelCase__ = AutoencoderKL.from_pretrained(model_path + """/vae""" )
UpperCAmelCase__ = vae_decoder.config.latent_channels
# forward only through the decoder part
UpperCAmelCase__ = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE__ , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE__ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=SCREAMING_SNAKE_CASE__ , )
del vae_decoder
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
UpperCAmelCase_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 61
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
UpperCAmelCase__ = update_area_of_max_square(SCREAMING_SNAKE_CASE__ , col + 1 )
UpperCAmelCase__ = update_area_of_max_square(row + 1 , col + 1 )
UpperCAmelCase__ = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
UpperCAmelCase__ = 1 + min([right, diagonal, down] )
UpperCAmelCase__ = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
return sub_problem_sol
else:
return 0
UpperCAmelCase__ = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
UpperCAmelCase__ = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
UpperCAmelCase__ = 1 + min([right, diagonal, down] )
UpperCAmelCase__ = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = sub_problem_sol
return sub_problem_sol
else:
return 0
UpperCAmelCase__ = [0]
UpperCAmelCase__ = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE__ )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE__ )
return largest_square_area[0]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ):
'''simple docstring'''
UpperCAmelCase__ = [[0] * (cols + 1) for _ in range(rows + 1 )]
UpperCAmelCase__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase__ = dp_array[row][col + 1]
UpperCAmelCase__ = dp_array[row + 1][col + 1]
UpperCAmelCase__ = dp_array[row + 1][col]
if mat[row][col] == 1:
UpperCAmelCase__ = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = max(dp_array[row][col] , SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = 0
return largest_square_area
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ):
'''simple docstring'''
UpperCAmelCase__ = [0] * (cols + 1)
UpperCAmelCase__ = [0] * (cols + 1)
UpperCAmelCase__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase__ = current_row[col + 1]
UpperCAmelCase__ = next_row[col + 1]
UpperCAmelCase__ = next_row[col]
if mat[row][col] == 1:
UpperCAmelCase__ = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = max(current_row[col] , SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = 0
UpperCAmelCase__ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 61
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 97
|
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318
| 0
|
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> str:
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 350
|
from typing import Any
def UpperCamelCase (lowercase_: list ) -> list[Any]:
if not input_list:
return []
A__ : Any = [input_list.count(lowercase_ ) for value in input_list]
A__ : List[Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :int , lowerCamelCase :AutoencoderKL , lowerCamelCase :CLIPTextModel , lowerCamelCase :CLIPTokenizer , lowerCamelCase :UNetaDConditionModel , lowerCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase :StableDiffusionSafetyChecker , lowerCamelCase :CLIPImageProcessor , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , )
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :Optional[Union[str, int]] = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def UpperCAmelCase_ ( self :Optional[int] ) -> Union[str, Any]:
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self :int , lowerCamelCase :Union[str, List[str]] , lowerCamelCase :int = 512 , lowerCamelCase :int = 512 , lowerCamelCase :int = 50 , lowerCamelCase :float = 7.5 , lowerCamelCase :Optional[Union[str, List[str]]] = None , lowerCamelCase :Optional[int] = 1 , lowerCamelCase :float = 0.0 , lowerCamelCase :Optional[torch.Generator] = None , lowerCamelCase :Optional[torch.FloatTensor] = None , lowerCamelCase :Optional[str] = "pil" , lowerCamelCase :bool = True , lowerCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase :int = 1 , lowerCamelCase :Optional[torch.FloatTensor] = None , **lowerCamelCase :List[str] , ) -> str:
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase , lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(lowerCamelCase )}.''' )
# get prompt text embeddings
UpperCAmelCase__ = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCAmelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = text_embeddings.shape
UpperCAmelCase__ = text_embeddings.repeat(1 , lowerCamelCase , 1 )
UpperCAmelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase__ = 42
if negative_prompt is None:
UpperCAmelCase__ = [""]
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='''
f''' {type(lowerCamelCase )}.''' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
UpperCAmelCase__ = negative_prompt
UpperCAmelCase__ = text_input_ids.shape[-1]
UpperCAmelCase__ = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="pt" , )
UpperCAmelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ = uncond_embeddings.shape[1]
UpperCAmelCase__ = uncond_embeddings.repeat(lowerCamelCase , lowerCamelCase , 1 )
UpperCAmelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase__ = torch.randn(
lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(self.device )
UpperCAmelCase__ = torch.randn(lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(
self.device )
else:
UpperCAmelCase__ = torch.randn(
lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
UpperCAmelCase__ = torch.randn(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase__ = latents_reference.to(self.device )
UpperCAmelCase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase__ = 0 if dx < 0 else dx
UpperCAmelCase__ = 0 if dy < 0 else dy
UpperCAmelCase__ = max(-dx , 0 )
UpperCAmelCase__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase__ = {}
if accepts_eta:
UpperCAmelCase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
UpperCAmelCase__ = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.chunk(2 )
UpperCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 1 / 0.1_82_15 * latents
UpperCAmelCase__ = self.vae.decode(lowerCamelCase ).sample
UpperCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase__ = self.feature_extractor(self.numpy_to_pil(lowerCamelCase ) , return_tensors="pt" ).to(
self.device )
UpperCAmelCase__ , UpperCAmelCase__ = self.safety_checker(
images=lowerCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase__ = None
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCamelCase , nsfw_content_detected=lowerCamelCase )
| 169
| 0
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def __a ( _UpperCamelCase: Dict , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Any ) -> Optional[int]:
"""simple docstring"""
_snake_case = hf_hub_url(repo_id=__lowerCamelCase , path=__lowerCamelCase , revision=__lowerCamelCase )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__lowerCamelCase )}"""
| 360
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ : List[Any] = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[Any] = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[int] = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 142
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaControlnetPipeline
snake_case_ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
snake_case_ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
snake_case_ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case_ = False
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return 100
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCamelCase = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.dummy_unet
__lowerCamelCase = self.dummy_movq
__lowerCamelCase = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowerCamelCase__ , )
__lowerCamelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__lowerCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create hint
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
__lowerCamelCase = output.images
__lowerCamelCase = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
__lowerCamelCase = torch.from_numpy(np.array(lowerCamelCase__ ) ).float() / 2_55.0
__lowerCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__lowerCamelCase = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
__lowerCamelCase = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
__lowerCamelCase = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A robot, 4k photo'
__lowerCamelCase = torch.Generator(device='cuda' ).manual_seed(0 )
__lowerCamelCase , __lowerCamelCase = pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowerCamelCase = torch.Generator(device='cuda' ).manual_seed(0 )
__lowerCamelCase = pipeline(
image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , hint=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=100 , output_type='np' , )
__lowerCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 90
|
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Tuple = embedding_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : str = None
if self.use_token_type_ids:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_choices
__lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Dict = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = MobileBertModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (_UpperCamelCase ):
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 86
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = KandinskyVaaInpaintPipeline
_snake_case : List[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_snake_case : List[str] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_snake_case : str = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case : Any = False
@property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return 3_2
@property
def __UpperCAmelCase ( self ) -> List[str]:
return 3_2
@property
def __UpperCAmelCase ( self ) -> str:
return self.time_input_dim
@property
def __UpperCAmelCase ( self ) -> int:
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self ) -> Dict:
return 1_0_0
@property
def __UpperCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase_ : Any = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def __UpperCAmelCase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Tuple = self.dummy_unet
UpperCAmelCase_ : Union[str, Any] = self.dummy_movq
UpperCAmelCase_ : List[str] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> str:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[Any] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create mask
UpperCAmelCase_ : Optional[Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCAmelCase_ : List[Any] = 0
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : int = 'cpu'
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : List[Any] = self.pipeline_class(**_UpperCamelCase )
UpperCAmelCase_ : List[Any] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : str = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : List[Any] = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase_ : Union[str, Any] = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __UpperCAmelCase ( self ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
UpperCAmelCase_ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
UpperCAmelCase_ : int = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Optional[Any] = 'a hat'
UpperCAmelCase_ : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
UpperCAmelCase_ : Dict = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[Any] = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
UpperCAmelCase_ : Dict = pipeline(
image=_UpperCamelCase , mask_image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='np' , )
UpperCAmelCase_ : Any = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 145
|
def lowercase__ ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[Any] ):
'''simple docstring'''
if index == r:
for j in range(__snake_case ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase_ : int = arr[i]
combination_util(__snake_case , __snake_case , __snake_case , index + 1 , __snake_case , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__snake_case , __snake_case , __snake_case , 0 , __snake_case , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__UpperCAmelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 145
| 1
|
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
UpperCAmelCase_ : List[str] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCAmelCase_ : Dict = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=__lowerCamelCase )
if decoder_head_mask is None:
UpperCAmelCase_ : Dict = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=__lowerCamelCase )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Optional[Any] = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=__lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=16 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="relu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Any = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = encoder_layerdrop
UpperCAmelCase_ : Dict = decoder_layerdrop
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : Any = eos_token_id
UpperCAmelCase_ : Optional[int] = pad_token_id
UpperCAmelCase_ : Union[str, Any] = bos_token_id
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Optional[int] = self.eos_token_id # Eos Token
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCAmelCase_ : Optional[Any] = input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase_ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase_ : Any = self.get_config()
UpperCAmelCase_ : List[Any] = prepare_mam_aaa_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = MaMaaaModel(config=lowercase_ ).get_decoder().to(lowercase_ ).eval()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Optional[Any] = inputs_dict["attention_mask"]
UpperCAmelCase_ : Optional[int] = inputs_dict["head_mask"]
# first forward pass
UpperCAmelCase_ : Any = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase_ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : int = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase_ : Any = model(lowercase_ , attention_mask=lowercase_ )["last_hidden_state"]
UpperCAmelCase_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[
"last_hidden_state"
]
# select random slice
UpperCAmelCase_ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-2 ) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = MaMaaaModel(config=lowercase_ ).to(lowercase_ ).eval()
UpperCAmelCase_ : Tuple = model(**lowercase_ )
UpperCAmelCase_ : int = outputs.encoder_last_hidden_state
UpperCAmelCase_ : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : List[Any] = model.get_encoder()
encoder.save_pretrained(lowercase_ )
UpperCAmelCase_ : Tuple = MaMaaaEncoder.from_pretrained(lowercase_ ).to(lowercase_ )
UpperCAmelCase_ : List[Any] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Optional[Any] = model.get_decoder()
decoder.save_pretrained(lowercase_ )
UpperCAmelCase_ : List[Any] = MaMaaaDecoder.from_pretrained(lowercase_ ).to(lowercase_ )
UpperCAmelCase_ : str = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=lowercase_ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class A_ (lowercase__ ,lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = MaMaaaModelTester(self )
UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Any = model_class.from_pretrained(lowercase_ , output_loading_info=lowercase_ )
self.assertEqual(info["missing_keys"] , [] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCAmelCase_ : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self._prepare_for_class(lowercase_ , lowercase_ ) )
if not self.is_encoder_decoder:
UpperCAmelCase_ : Tuple = inputs["input_ids"]
del inputs["input_ids"]
else:
UpperCAmelCase_ : Optional[Any] = inputs["input_ids"]
UpperCAmelCase_ : List[str] = inputs.get("decoder_input_ids" , lowercase_ )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , lowercase_ )
UpperCAmelCase_ : List[str] = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCAmelCase_ : Dict = wte(lowercase_ )
else:
UpperCAmelCase_ : Dict = wte(lowercase_ )
UpperCAmelCase_ : Optional[int] = wte(lowercase_ )
with torch.no_grad():
model(**lowercase_ )[0]
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Union[str, Any] = input_dict["input_ids"]
UpperCAmelCase_ : List[str] = input_ids.ne(1 ).to(lowercase_ )
UpperCAmelCase_ : Tuple = MaMaaaForConditionalGeneration(lowercase_ ).eval().to(lowercase_ )
if torch_device == "cuda":
model.half()
model.generate(lowercase_ , attention_mask=lowercase_ )
model.generate(num_beams=4 , do_sample=lowercase_ , early_stopping=lowercase_ , num_return_sequences=3 )
def __a ( __lowerCamelCase ):
return torch.tensor(__lowerCamelCase, dtype=torch.long, device=__lowerCamelCase )
_a = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class A_ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
UpperCAmelCase_ : str = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
UpperCAmelCase_ : str = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
UpperCAmelCase_ : str = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**lowercase_ )[0]
UpperCAmelCase_ : Optional[int] = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
UpperCAmelCase_ : List[Any] = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=lowercase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
# change to intended input
UpperCAmelCase_ : str = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
UpperCAmelCase_ : Optional[int] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
UpperCAmelCase_ : str = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_ )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowercase_ )[0]
UpperCAmelCase_ : str = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
UpperCAmelCase_ : Any = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=lowercase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
UpperCAmelCase_ : Tuple = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
UpperCAmelCase_ : str = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCAmelCase_ : List[Any] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
UpperCAmelCase_ : Optional[int] = model.generate(
input_ids=dct["input_ids"].to(lowercase_ ) , attention_mask=dct["attention_mask"].to(lowercase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
UpperCAmelCase_ : List[Any] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
UpperCAmelCase_ : List[str] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=lowercase_ , skip_special_tokens=lowercase_ )
assert generated == expected_en
| 61
|
"""simple docstring"""
from __future__ import annotations
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ , UpperCAmelCase_ : str = set(__lowerCamelCase ), [start]
while stack:
UpperCAmelCase_ : Any = stack.pop()
explored.add(__lowerCamelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__lowerCamelCase )
return explored
_a = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 61
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Tuple = logging.get_logger(__name__)
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Dict=False , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :int=False ):
'''simple docstring'''
snake_case_ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
snake_case_ : Optional[Any] = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : Dict = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
snake_case_ : Tuple = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Dict = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : Tuple = in_proj_bias[: config.hidden_size]
snake_case_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : str = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase ( lowerCamelCase_ :List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : int = dct.pop(lowerCamelCase_ )
snake_case_ : List[Any] = val
@torch.no_grad()
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=lowerCamelCase_ )
snake_case_ : Optional[Any] = False
snake_case_ : Union[str, Any] = False
snake_case_ : Dict = False
snake_case_ : Any = False
if "vqa" in checkpoint_url:
snake_case_ : List[Any] = True
snake_case_ : List[Any] = 31_29
snake_case_ : List[Any] = '''huggingface/label-files'''
snake_case_ : Union[str, Any] = '''vqa2-id2label.json'''
snake_case_ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ : Optional[Any] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
snake_case_ : Dict = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = ViltForQuestionAnswering(lowerCamelCase_ )
elif "nlvr" in checkpoint_url:
snake_case_ : str = True
snake_case_ : Optional[Any] = 2
snake_case_ : Optional[Any] = {0: '''False''', 1: '''True'''}
snake_case_ : Union[str, Any] = {v: k for k, v in config.idalabel.items()}
snake_case_ : int = 3
snake_case_ : int = ViltForImagesAndTextClassification(lowerCamelCase_ )
elif "irtr" in checkpoint_url:
snake_case_ : str = True
snake_case_ : List[str] = ViltForImageAndTextRetrieval(lowerCamelCase_ )
elif "mlm_itm" in checkpoint_url:
snake_case_ : Any = True
snake_case_ : int = ViltForMaskedLM(lowerCamelCase_ )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
snake_case_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" )['''state_dict''']
snake_case_ : Tuple = create_rename_keys(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ )
if mlm_model or irtr_model:
snake_case_ : int = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
snake_case_ : str = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowerCamelCase_ )
# Define processor
snake_case_ : List[str] = ViltImageProcessor(size=3_84 )
snake_case_ : Tuple = BertTokenizer.from_pretrained("""bert-base-uncased""" )
snake_case_ : Union[str, Any] = ViltProcessor(lowerCamelCase_ , lowerCamelCase_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
snake_case_ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=lowerCamelCase_ ).raw )
snake_case_ : Union[str, Any] = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=lowerCamelCase_ ).raw )
snake_case_ : Union[str, Any] = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
snake_case_ : Optional[int] = processor(lowerCamelCase_ , lowerCamelCase_ , return_tensors="""pt""" )
snake_case_ : Tuple = processor(lowerCamelCase_ , lowerCamelCase_ , return_tensors="""pt""" )
snake_case_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
snake_case_ : Dict = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=lowerCamelCase_ ).raw )
if mlm_model:
snake_case_ : int = '''a bunch of [MASK] laying on a [MASK].'''
else:
snake_case_ : Optional[int] = '''How many cats are there?'''
snake_case_ : Optional[int] = processor(lowerCamelCase_ , lowerCamelCase_ , return_tensors="""pt""" )
snake_case_ : Any = model(**lowerCamelCase_ )
# Verify outputs
if mlm_model:
snake_case_ : Dict = torch.Size([1, 11, 3_05_22] )
snake_case_ : int = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCamelCase_ , atol=1E-4 )
# verify masked token prediction equals "cats"
snake_case_ : Optional[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
snake_case_ : List[Any] = torch.Size([1, 31_29] )
snake_case_ : str = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCamelCase_ , atol=1E-4 )
# verify vqa prediction equals "2"
snake_case_ : List[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
snake_case_ : Union[str, Any] = torch.Size([1, 2] )
snake_case_ : Dict = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__A : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 355
|
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __UpperCamelCase ( nn.Module ):
def __init__( self :Any ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int=0.0 ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ,_UpperCamelCase :str = "layer_norm" ,_UpperCamelCase :bool = False ,):
super().__init__()
snake_case_ : Any = only_cross_attention
snake_case_ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
snake_case_ : Any = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case_ : Dict = AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase )
elif self.use_ada_layer_norm_zero:
snake_case_ : str = AdaLayerNormZero(_UpperCamelCase ,_UpperCamelCase )
else:
snake_case_ : List[Any] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
snake_case_ : List[str] = Attention(
query_dim=_UpperCamelCase ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=_UpperCamelCase ,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case_ : str = (
AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
)
snake_case_ : List[str] = Attention(
query_dim=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,upcast_attention=_UpperCamelCase ,) # is self-attn if encoder_hidden_states is none
else:
snake_case_ : Any = None
snake_case_ : Optional[Any] = None
# 3. Feed-forward
snake_case_ : List[str] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
snake_case_ : Union[str, Any] = FeedForward(_UpperCamelCase ,dropout=_UpperCamelCase ,activation_fn=_UpperCamelCase ,final_dropout=_UpperCamelCase )
# let chunk size default to None
snake_case_ : Optional[int] = None
snake_case_ : Dict = 0
def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ):
# Sets chunk feed-forward
snake_case_ : Optional[Any] = chunk_size
snake_case_ : Optional[Any] = dim
def a__ ( self :List[str] ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,_UpperCamelCase :Dict[str, Any] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ,_UpperCamelCase )
elif self.use_ada_layer_norm_zero:
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = self.norma(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=hidden_states.dtype )
else:
snake_case_ : Optional[int] = self.norma(_UpperCamelCase )
snake_case_ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case_ : Union[str, Any] = self.attna(
_UpperCamelCase ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,)
if self.use_ada_layer_norm_zero:
snake_case_ : Union[str, Any] = gate_msa.unsqueeze(1 ) * attn_output
snake_case_ : Union[str, Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case_ : Any = (
self.norma(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase )
)
snake_case_ : List[Any] = self.attna(
_UpperCamelCase ,encoder_hidden_states=_UpperCamelCase ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,)
snake_case_ : Tuple = attn_output + hidden_states
# 3. Feed-forward
snake_case_ : Optional[Any] = self.norma(_UpperCamelCase )
if self.use_ada_layer_norm_zero:
snake_case_ : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
snake_case_ : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case_ : int = torch.cat(
[self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,)
else:
snake_case_ : List[str] = self.ff(_UpperCamelCase )
if self.use_ada_layer_norm_zero:
snake_case_ : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
snake_case_ : Any = ff_output + hidden_states
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :Dict ,_UpperCamelCase :int ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :int = 4 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :bool = False ,):
super().__init__()
snake_case_ : Tuple = int(dim * mult )
snake_case_ : Optional[int] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case_ : Any = GELU(_UpperCamelCase ,_UpperCamelCase )
if activation_fn == "gelu-approximate":
snake_case_ : Tuple = GELU(_UpperCamelCase ,_UpperCamelCase ,approximate="""tanh""" )
elif activation_fn == "geglu":
snake_case_ : Dict = GEGLU(_UpperCamelCase ,_UpperCamelCase )
elif activation_fn == "geglu-approximate":
snake_case_ : Optional[Any] = ApproximateGELU(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Dict = nn.ModuleList([] )
# project in
self.net.append(_UpperCamelCase )
# project dropout
self.net.append(nn.Dropout(_UpperCamelCase ) )
# project out
self.net.append(nn.Linear(_UpperCamelCase ,_UpperCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_UpperCamelCase ) )
def a__ ( self :Tuple ,_UpperCamelCase :Union[str, Any] ):
for module in self.net:
snake_case_ : Tuple = module(_UpperCamelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :str = "none" ):
super().__init__()
snake_case_ : Union[str, Any] = nn.Linear(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Optional[Any] = approximate
def a__ ( self :str ,_UpperCamelCase :int ):
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase ,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype )
def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ):
snake_case_ : Optional[Any] = self.proj(_UpperCamelCase )
snake_case_ : int = self.gelu(_UpperCamelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ):
super().__init__()
snake_case_ : str = nn.Linear(_UpperCamelCase ,dim_out * 2 )
def a__ ( self :Dict ,_UpperCamelCase :List[str] ):
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[int] ):
snake_case_ , snake_case_ : Dict = self.proj(_UpperCamelCase ).chunk(2 ,dim=-1 )
return hidden_states * self.gelu(_UpperCamelCase )
class __UpperCamelCase ( nn.Module ):
def __init__( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :int ):
super().__init__()
snake_case_ : int = nn.Linear(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[int] ):
snake_case_ : int = self.proj(_UpperCamelCase )
return x * torch.sigmoid(1.7_02 * x )
class __UpperCamelCase ( nn.Module ):
def __init__( self :int ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ):
super().__init__()
snake_case_ : int = nn.Embedding(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Union[str, Any] = nn.SiLU()
snake_case_ : Any = nn.Linear(_UpperCamelCase ,embedding_dim * 2 )
snake_case_ : Dict = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :int ):
snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ) ) )
snake_case_ , snake_case_ : Tuple = torch.chunk(_UpperCamelCase ,2 )
snake_case_ : Tuple = self.norm(_UpperCamelCase ) * (1 + scale) + shift
return x
class __UpperCamelCase ( nn.Module ):
def __init__( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :int ):
super().__init__()
snake_case_ : int = CombinedTimestepLabelEmbeddings(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : int = nn.SiLU()
snake_case_ : List[str] = nn.Linear(_UpperCamelCase ,6 * embedding_dim ,bias=_UpperCamelCase )
snake_case_ : str = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ,eps=1E-6 )
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str=None ):
snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=_UpperCamelCase ) ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = emb.chunk(6 ,dim=1 )
snake_case_ : str = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __UpperCamelCase ( nn.Module ):
def __init__( self :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :Optional[str] = None ,_UpperCamelCase :float = 1E-5 ):
super().__init__()
snake_case_ : Optional[int] = num_groups
snake_case_ : List[Any] = eps
if act_fn is None:
snake_case_ : int = None
else:
snake_case_ : Dict = get_activation(_UpperCamelCase )
snake_case_ : Optional[int] = nn.Linear(_UpperCamelCase ,out_dim * 2 )
def a__ ( self :List[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ):
if self.act:
snake_case_ : Any = self.act(_UpperCamelCase )
snake_case_ : Optional[int] = self.linear(_UpperCamelCase )
snake_case_ : Dict = emb[:, :, None, None]
snake_case_ , snake_case_ : str = emb.chunk(2 ,dim=1 )
snake_case_ : str = F.group_norm(_UpperCamelCase ,self.num_groups ,eps=self.eps )
snake_case_ : List[str] = x * (1 + scale) + shift
return x
| 8
| 0
|
'''simple docstring'''
from copy import deepcopy
class lowerCAmelCase :
def __init__( self : List[str] , __lowercase : list[int] | None = None , __lowercase : int | None = None ):
"""simple docstring"""
if arr is None and size is not None:
__lowercase =size
__lowercase =[0] * size
elif arr is not None:
self.init(__lowercase )
else:
raise ValueError('Either arr or size must be specified' )
def snake_case ( self : Optional[int] , __lowercase : list[int] ):
"""simple docstring"""
__lowercase =len(__lowercase )
__lowercase =deepcopy(__lowercase )
for i in range(1 , self.size ):
__lowercase =self.next_(__lowercase )
if j < self.size:
self.tree[j] += self.tree[i]
def snake_case ( self : List[str] ):
"""simple docstring"""
__lowercase =self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
__lowercase =self.next_(__lowercase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def snake_case ( __lowercase : int ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def snake_case ( __lowercase : int ):
"""simple docstring"""
return index - (index & (-index))
def snake_case ( self : List[Any] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
__lowercase =self.next_(__lowercase )
def snake_case ( self : int , __lowercase : int , __lowercase : int ):
"""simple docstring"""
self.add(__lowercase , value - self.get(__lowercase ) )
def snake_case ( self : str , __lowercase : int ):
"""simple docstring"""
if right == 0:
return 0
__lowercase =self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
__lowercase =self.prev(__lowercase )
return result
def snake_case ( self : List[str] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
return self.prefix(__lowercase ) - self.prefix(__lowercase )
def snake_case ( self : Dict , __lowercase : int ):
"""simple docstring"""
return self.query(__lowercase , index + 1 )
def snake_case ( self : List[str] , __lowercase : int ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
__lowercase =1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
__lowercase =0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141
|
'''simple docstring'''
import math
class lowerCAmelCase :
def snake_case ( self : Optional[int] , __lowercase : list[list[float]] , __lowercase : list[int] ):
"""simple docstring"""
__lowercase =0.0
__lowercase =0.0
for i in range(len(__lowercase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def snake_case ( self : Union[str, Any] , __lowercase : list[list[int | float]] , __lowercase : list[int] , __lowercase : int , __lowercase : float ):
"""simple docstring"""
for i in range(len(__lowercase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__lowercase =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__lowercase =SelfOrganizingMap()
__lowercase =3
__lowercase =0.5
for _ in range(lowercase__ ):
for j in range(len(lowercase__ ) ):
# training sample
__lowercase =training_samples[j]
# Compute the winning vector
__lowercase =self_organizing_map.get_winner(lowercase__, lowercase__ )
# Update the winning vector
__lowercase =self_organizing_map.update(lowercase__, lowercase__, lowercase__, lowercase__ )
# classify test sample
__lowercase =[0, 0, 0, 1]
__lowercase =self_organizing_map.get_winner(lowercase__, lowercase__ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 141
| 1
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def UpperCAmelCase_ (__a : str , __a : Union[str, Any] , __a : Tuple ):
"""simple docstring"""
_a : Tuple = hf_hub_url(repo_id=__a , path=__a , revision=__a )
assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__a )}"""
| 354
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
lowercase_ = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
lowercase_ = "</w>"
lowercase_ = "@@ "
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
'''simple docstring'''
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
return pairs
# Speech2Text2 has no max input length
lowercase_ = {"facebook/s2t-wav2vec2-large-en-de": 1024}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self : Tuple,lowercase_ : Dict,lowercase_ : int="<s>",lowercase_ : Optional[int]="<pad>",lowercase_ : Tuple="</s>",lowercase_ : List[str]="<unk>",lowercase_ : Any=False,lowercase_ : Optional[Any]=None,**lowercase_ : Optional[Any],)-> List[Any]:
'''simple docstring'''
super().__init__(
unk_token=lowercase_,bos_token=lowercase_,eos_token=lowercase_,pad_token=lowercase_,do_lower_case=lowercase_,**lowercase_,)
A__ = do_lower_case
with open(lowercase_,encoding='utf-8' ) as vocab_handle:
A__ = json.load(lowercase_ )
A__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'No merges files provided. {self.__class__.__name__} can only be used for decoding.' )
A__ = None
A__ = None
else:
with open(lowercase_,encoding='utf-8' ) as merges_handle:
A__ = merges_handle.read().split('\n' )[:-1]
A__ = [tuple(merge.split()[:2] ) for merge in merges]
A__ = dict(zip(lowercase_,range(len(lowercase_ ) ) ) )
A__ = {}
@property
def snake_case__ ( self : str )-> int:
'''simple docstring'''
return len(self.decoder )
def snake_case__ ( self : Any )-> Dict:
'''simple docstring'''
return dict(self.encoder,**self.added_tokens_encoder )
def snake_case__ ( self : Optional[int],lowercase_ : int )-> str:
'''simple docstring'''
A__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A__ = get_pairs(lowercase_ )
if not pairs:
return token
while True:
A__ = min(lowercase_,key=lambda lowercase_ : self.bpe_ranks.get(lowercase_,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(lowercase_ ):
try:
A__ = word.index(lowercase_,lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ = tuple(lowercase_ )
A__ = new_word
if len(lowercase_ ) == 1:
break
else:
A__ = get_pairs(lowercase_ )
A__ = ' '.join(lowercase_ )
if word == "\n " + BPE_TOKEN_MERGES:
A__ = '\n' + BPE_TOKEN_MERGES
if word.endswith(lowercase_ ):
A__ = word.replace(lowercase_,'' )
A__ = word.replace(' ',lowercase_ )
A__ = word
return word
def snake_case__ ( self : int,lowercase_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A__ = text.lower()
A__ = text.split()
A__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(lowercase_ ).split(' ' ) ) )
return split_tokens
def snake_case__ ( self : Any,lowercase_ : str )-> int:
'''simple docstring'''
return self.encoder.get(lowercase_,self.encoder.get(self.unk_token ) )
def snake_case__ ( self : Optional[Any],lowercase_ : int )-> str:
'''simple docstring'''
A__ = self.decoder.get(lowercase_,self.unk_token )
return result
def snake_case__ ( self : Dict,lowercase_ : List[str] )-> str:
'''simple docstring'''
A__ = ' '.join(lowercase_ )
# make sure @@ tokens are concatenated
A__ = ''.join(string.split(lowercase_ ) )
return string
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : Optional[str] = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
lowercase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(
lowercase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowercase_,'w',encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=lowercase_,ensure_ascii=lowercase_ ) + '\n' )
A__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(lowercase_,'w',encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
A__ = token_index
writer.write(' '.join(lowercase_ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 7
|
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , A : int = 6 ) ->None:
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
self.create_linked_list(A )
def __lowerCamelCase ( self : Optional[int] , A : int ) ->None:
lowerCamelCase__ : Optional[int] = Node()
lowerCamelCase__ : List[str] = current_node
lowerCamelCase__ : Union[str, Any] = current_node
lowerCamelCase__ : List[str] = current_node
for _ in range(1 , A ):
lowerCamelCase__ : List[str] = Node()
lowerCamelCase__ : List[Any] = current_node
lowerCamelCase__ : Optional[Any] = previous_node
lowerCamelCase__ : Dict = current_node
lowerCamelCase__ : Union[str, Any] = self.front
lowerCamelCase__ : int = previous_node
def __lowerCamelCase ( self : Optional[int] ) ->bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __lowerCamelCase ( self : Optional[int] ) ->Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __lowerCamelCase ( self : Optional[int] , A : Any ) ->None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCamelCase__ : List[str] = self.rear.next
if self.rear:
lowerCamelCase__ : Optional[Any] = data
def __lowerCamelCase ( self : str ) ->Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCamelCase__ : List[Any] = self.front.data
lowerCamelCase__ : Optional[Any] = None
return data
lowerCamelCase__ : Optional[int] = self.front
lowerCamelCase__ : Optional[int] = old_front.next
lowerCamelCase__ : Any = old_front.data
lowerCamelCase__ : List[str] = None
return data
def __lowerCamelCase ( self : Dict ) ->None:
if self.is_empty():
raise Exception('''Empty Queue''' )
def __lowerCamelCase ( self : int ) ->None:
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->None:
lowerCamelCase__ : Any | None = None
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Any =tempfile.mkdtemp()
UpperCAmelCase : Tuple =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCAmelCase : int ={
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
UpperCAmelCase : Optional[int] =os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self , **snake_case__ ) -> List[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase__ ( self , **snake_case__ ) -> int:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase__ ( self , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase : Union[str, Any] =[Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =self.get_tokenizer()
UpperCAmelCase : Dict =self.get_rust_tokenizer()
UpperCAmelCase : int =self.get_image_processor()
UpperCAmelCase : Tuple =AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[int] =AlignProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ )
UpperCAmelCase : Optional[int] =AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase : Tuple =AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case__ )
self.assertIsInstance(processor_fast.tokenizer , snake_case__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case__ )
self.assertIsInstance(processor_fast.image_processor , snake_case__ )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int =AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : Dict =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase : List[Any] =self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
UpperCAmelCase : Tuple =AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] =self.get_image_processor()
UpperCAmelCase : Any =self.get_tokenizer()
UpperCAmelCase : Optional[Any] =AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase : Any =self.prepare_image_inputs()
UpperCAmelCase : Optional[Any] =image_processor(snake_case__ , return_tensors='''np''' )
UpperCAmelCase : int =processor(images=snake_case__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Dict =self.get_image_processor()
UpperCAmelCase : List[Any] =self.get_tokenizer()
UpperCAmelCase : int =AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase : int ='''lower newer'''
UpperCAmelCase : str =processor(text=snake_case__ )
UpperCAmelCase : Tuple =tokenizer(snake_case__ , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] =self.get_image_processor()
UpperCAmelCase : Tuple =self.get_tokenizer()
UpperCAmelCase : Tuple =AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase : int ='''lower newer'''
UpperCAmelCase : str =self.prepare_image_inputs()
UpperCAmelCase : Dict =processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict =self.get_image_processor()
UpperCAmelCase : Union[str, Any] =self.get_tokenizer()
UpperCAmelCase : Dict =AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase : Optional[Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase : Dict =processor.batch_decode(snake_case__ )
UpperCAmelCase : List[Any] =tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.get_image_processor()
UpperCAmelCase : Any =self.get_tokenizer()
UpperCAmelCase : List[str] =AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase : Optional[int] ='''lower newer'''
UpperCAmelCase : Dict =self.prepare_image_inputs()
UpperCAmelCase : List[str] =processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 369
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = ["""image_processor""", """tokenizer"""]
__lowerCamelCase : Union[str, Any] = """CLIPImageProcessor"""
__lowerCamelCase : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , snake_case__ , )
UpperCAmelCase : int =kwargs.pop('''feature_extractor''' )
UpperCAmelCase : Tuple =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase : List[Any] =self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
UpperCAmelCase : Tuple =self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
UpperCAmelCase : List[Any] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.tokenizer.model_input_names
UpperCAmelCase : Union[str, Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , snake_case__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , snake_case__ , )
return self.image_processor
| 78
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
_UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
_UpperCAmelCase : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
print(F"""Found {torch.cuda.device_count()} devices.""" )
_UpperCAmelCase : List[str] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
print(F"""Found {torch.cuda.device_count()} devices.""" )
_UpperCAmelCase : int = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : int = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_UpperCAmelCase : Optional[Any] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__a = Accelerator()
__a = (accelerator.state.process_index + 2, 10)
__a = torch.randint(0, 10, shape).to(accelerator.device)
__a = ''
__a = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__a = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__a = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 145
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , lowerCAmelCase__ , )
super().__init__(args=lowerCAmelCase__ , **lowerCAmelCase__ )
| 145
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase_ = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase_ = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' )
if "norm" in key:
lowerCAmelCase_ = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ = key[key.find('block' ) + len('block' )]
lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' )
if "attn.q" in key:
lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase_ = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase_ = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase_ = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' )
if "bot_conv" in key:
lowerCAmelCase_ = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase_ = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase_ = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase_ = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase_ = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase_ = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase_ = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase_ = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase_ = value
return new_state_dict
def lowerCamelCase ( a_ , a_ ) -> str:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase_ = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ = kv_bias[config.hidden_sizes[i] :]
def lowerCamelCase ( ) -> Any:
lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( a_ , a_ , a_=False , a_=None ) -> Optional[int]:
lowerCAmelCase_ = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase_ = GLPNImageProcessor()
# prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase_ = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_ , a_ )
# create HuggingFace model and load state dict
lowerCAmelCase_ = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
lowerCAmelCase_ = model(a_ )
lowerCAmelCase_ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase_ = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase_ = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
lowerCAmelCase_ = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , a_ , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=a_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=a_ , )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
lowerCamelCase_ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 369
|
from maths.prime_factors import prime_factors
def lowerCamelCase ( a_ ) -> int:
if not isinstance(a_ , a_ ):
lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(a_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
| 0
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(self , **_lowerCamelCase )
a :Union[str, Any] = repo_info
a :int = token
a :int = None
def SCREAMING_SNAKE_CASE__ ( self ):
if self.dir_cache is None:
a :Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a :List[Any] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ):
if not isinstance(self.repo_info , _lowerCamelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
a :Optional[int] = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ):
self._get_dirs()
a :Union[str, Any] = self._strip_protocol(_lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ):
self._get_dirs()
a :str = PurePosixPath(path.strip('''/''' ) )
a :Tuple = {}
for p, f in self.dir_cache.items():
a :Optional[int] = PurePosixPath(p.strip('''/''' ) )
a :str = p.parent
if root == path:
a :List[str] = f
a :Any = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 94
|
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__( self : Optional[int] ) ->List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def snake_case__( self : List[Any] ) ->Optional[int]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple:
snake_case_ = mean_squared_error(
_UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase )
return {"mse": mse}
| 8
| 0
|
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase_ = trt.Logger(trt.Logger.WARNING)
lowerCamelCase_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase_ = logging.getLogger(__name__)
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_84,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_28,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
lowerCamelCase_ = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
lowerCamelCase_ = args.per_device_eval_batch_size
lowerCamelCase_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase_ = True
lowerCamelCase_ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
lowerCamelCase_ = 'temp_engine/bert-fp16.engine'
if args.inta:
lowerCamelCase_ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
lowerCamelCase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase_ = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase_ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : List[Any] , __A : Tuple , __A : Optional[int] , __A : Union[str, Any] , __A : List[str] , __A : List[str] , __A : Optional[int] ) -> str:
_SCREAMING_SNAKE_CASE = np.asarray(inputs["input_ids"] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __A )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __A )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __A )
# start time
_SCREAMING_SNAKE_CASE = time.time()
# Run inference
context.execute_async(
bindings=[int(__A ) for d_inp in d_inputs] + [int(__A ), int(__A )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__A , __A , __A )
cuda.memcpy_dtoh_async(__A , __A , __A )
# Synchronize the stream and take time
stream.synchronize()
# end time
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = end_time - start_time
_SCREAMING_SNAKE_CASE = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase_ = raw_datasets['validation'].column_names
lowerCamelCase_ = 'question' if 'question' in column_names else column_names[0]
lowerCamelCase_ = 'context' if 'context' in column_names else column_names[1]
lowerCamelCase_ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase_ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase_ = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> Tuple:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
_SCREAMING_SNAKE_CASE = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_SCREAMING_SNAKE_CASE = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=__A , stride=args.doc_stride , return_overflowing_tokens=__A , return_offsets_mapping=__A , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_SCREAMING_SNAKE_CASE = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_SCREAMING_SNAKE_CASE = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_SCREAMING_SNAKE_CASE = tokenized_examples.sequence_ids(__A )
_SCREAMING_SNAKE_CASE = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_SCREAMING_SNAKE_CASE = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_SCREAMING_SNAKE_CASE = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
lowerCamelCase_ = raw_datasets['validation']
# Validation Feature Creation
lowerCamelCase_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
lowerCamelCase_ = default_data_collator
lowerCamelCase_ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
lowerCamelCase_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Dict , __A : Dict , __A : List[Any]="eval" ) -> int:
# Post-processing: we match the start logits and end logits to answers in the original context.
_SCREAMING_SNAKE_CASE = postprocess_qa_predictions(
examples=__A , features=__A , predictions=__A , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__A , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_SCREAMING_SNAKE_CASE = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
_SCREAMING_SNAKE_CASE = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
_SCREAMING_SNAKE_CASE = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__A , label_ids=__A )
lowerCamelCase_ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> List[Any]:
return trt.volume(engine.get_binding_shape(__A ) ) * engine.get_binding_dtype(__A ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase_ = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase_ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase_ = 0.0
lowerCamelCase_ = 0
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase_ , lowerCamelCase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase_ , lowerCamelCase_ = outputs
lowerCamelCase_ = torch.tensor(start_logits)
lowerCamelCase_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCamelCase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCamelCase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCamelCase_ = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase_ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 10_00 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 10_00))
logger.info('Total Number of Inference = %d', niter)
lowerCamelCase_ = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 111
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowercase_ :
"""simple docstring"""
def lowerCAmelCase_ ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
"""simple docstring"""
return None
class lowercase_ :
"""simple docstring"""
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
return None
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "tf" , 1_2 , **__lowerCamelCase )
@require_torch
@slow
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "pt" , 1_2 , **__lowerCamelCase )
@require_torch
@slow
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
from transformers import BertModel
_SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__lowerCamelCase ) )
vocab_file.flush()
_SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) )
model.save_pretrained(__lowerCamelCase )
self._test_export(__lowerCamelCase , "pt" , 1_2 , __lowerCamelCase )
@require_tf
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 1_2 , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 1_2 , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : int ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
_SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
return path
except Exception as e:
self.fail(__lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
from transformers import BertModel
_SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
_SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
from transformers import TFBertModel
_SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
_SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" )
def lowerCAmelCase_ ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"]
_SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 111
| 1
|
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =['a', 'b', 'c']
# Defaults to last layer if both are None
__UpperCamelCase , __UpperCamelCase : List[str] =get_aligned_output_features_output_indices(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , ['c'] )
self.assertEqual(lowerCamelCase__ , [2] )
# Out indices set to match out features
__UpperCamelCase , __UpperCamelCase : List[Any] =get_aligned_output_features_output_indices(['a', 'c'] , lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , ['a', 'c'] )
self.assertEqual(lowerCamelCase__ , [0, 2] )
# Out features set to match out indices
__UpperCamelCase , __UpperCamelCase : int =get_aligned_output_features_output_indices(lowerCamelCase__ , [0, 2] , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , ['a', 'c'] )
self.assertEqual(lowerCamelCase__ , [0, 2] )
# Out features selected from negative indices
__UpperCamelCase , __UpperCamelCase : Dict =get_aligned_output_features_output_indices(lowerCamelCase__ , [-3, -1] , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , ['a', 'c'] )
self.assertEqual(lowerCamelCase__ , [-3, -1] )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , lowerCamelCase__ )
# Out features must be a list
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(lowerCamelCase__ , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(lowerCamelCase__ , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =BackboneMixin()
__UpperCamelCase : Optional[Any] =['a', 'b', 'c']
__UpperCamelCase : Dict =['a', 'c']
__UpperCamelCase : Union[str, Any] =[0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
__UpperCamelCase : Tuple =['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
__UpperCamelCase : Tuple =[-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 71
|
from math import isqrt
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowercase =False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int:
"""simple docstring"""
_lowercase =calculate_prime_numbers(max_number // 2 )
_lowercase =0
_lowercase =0
_lowercase =len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] ={"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__lowerCAmelCase : Dict ={
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__lowerCAmelCase : str ={
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def UpperCamelCase ( ):
A__ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
A__ = bs[:]
A__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
A__ = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def UpperCamelCase ( _lowerCamelCase : List[str] ):
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
return pairs
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self :str , lowercase_ :str , lowercase_ :str , lowercase_ :Union[str, Any]="replace" , lowercase_ :Dict="<s>" , lowercase_ :Union[str, Any]="</s>" , lowercase_ :List[Any]="</s>" , lowercase_ :List[Any]="<s>" , lowercase_ :Tuple="<unk>" , lowercase_ :Union[str, Any]="<pad>" , lowercase_ :Optional[int]="<mask>" , lowercase_ :List[str]=False , **lowercase_ :Any , )-> str:
A__ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token
A__ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token
A__ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token
A__ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token
A__ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else unk_token
A__ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
A__ = json.load(lowercase_ )
A__ = {v: k for k, v in self.encoder.items()}
A__ = errors # how to handle errors in decoding
A__ = bytes_to_unicode()
A__ = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase_ , encoding="utf-8" ) as merges_handle:
A__ = merges_handle.read().split("\n" )[1:-1]
A__ = [tuple(merge.split() ) for merge in bpe_merges]
A__ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
A__ = {}
A__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A__ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCAmelCase_ ( self :List[Any] )-> int:
return len(self.encoder )
def UpperCAmelCase_ ( self :List[Any] )-> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Tuple )-> Tuple:
if token in self.cache:
return self.cache[token]
A__ = tuple(lowercase_ )
A__ = get_pairs(lowercase_ )
if not pairs:
return token
while True:
A__ = min(lowercase_ , key=lambda lowercase_ : self.bpe_ranks.get(lowercase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A__, A__ = bigram
A__ = []
A__ = 0
while i < len(lowercase_ ):
try:
A__ = word.index(lowercase_ , lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ = tuple(lowercase_ )
A__ = new_word
if len(lowercase_ ) == 1:
break
else:
A__ = get_pairs(lowercase_ )
A__ = " ".join(lowercase_ )
A__ = word
return word
def UpperCAmelCase_ ( self :Dict , lowercase_ :Dict )-> Dict:
A__ = []
for token in re.findall(self.pat , lowercase_ ):
A__ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase_ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self :str , lowercase_ :List[str] )-> Dict:
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self :int , lowercase_ :List[str] )-> List[Any]:
return self.decoder.get(lowercase_ )
def UpperCAmelCase_ ( self :str , lowercase_ :Union[str, Any] )-> Tuple:
A__ = "".join(lowercase_ )
A__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :str , lowercase_ :Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A__ = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
A__ = 0
with open(lowercase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
A__ = token_index
writer.write(" ".join(lowercase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None , lowercase_ :bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None )-> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Optional[Any] , lowercase_ :Any=False , **lowercase_ :List[Any] )-> List[Any]:
A__ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase_ ) > 0 and not text[0].isspace()):
A__ = " " + text
return (text, kwargs)
| 123
|
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = (PNDMScheduler,)
__lowercase = (("""num_inference_steps""", 50),)
def UpperCAmelCase_ ( self :List[str] , **lowercase_ :Optional[int] )-> List[str]:
A__ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCAmelCase_ ( self :int , lowercase_ :Optional[int]=0 , **lowercase_ :Any )-> int:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self :List[Any] )-> Tuple:
pass
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :Optional[Any]=0 , **lowercase_ :List[str] )-> str:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self :Optional[int] , **lowercase_ :Dict )-> Optional[int]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCAmelCase_ ( self :int )-> Union[str, Any]:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
A__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
A__ = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A__ = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
A__ = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self :Any )-> Dict:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> int:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1 )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def UpperCAmelCase_ ( self :Optional[Any] )-> Any:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] )-> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] )-> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> str:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
A__ = 27
for scheduler_class in self.scheduler_classes:
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCAmelCase_ ( self :List[str] )-> str:
with self.assertRaises(lowercase_ ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCAmelCase_ ( self :int )-> str:
A__ = self.full_loop()
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1E-3
def UpperCAmelCase_ ( self :Optional[Any] )-> List[Any]:
A__ = self.full_loop(prediction_type="v_prediction" )
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1E-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1E-3
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.0_1 )
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1E-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1E-3
def UpperCAmelCase_ ( self :Dict )-> Any:
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.0_1 )
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1E-3
| 123
| 1
|
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=13 , _UpperCAmelCase : Tuple=7 , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Any=99 , _UpperCAmelCase : Union[str, Any]=32 , _UpperCAmelCase : str=5 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : int=4 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : int=None , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_multiple_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = weight_tying
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = GPTNeoXJapaneseModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase__ = model(lowercase_ , attention_mask=lowercase_ )
UpperCAmelCase__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = True
UpperCAmelCase__ = GPTNeoXJapaneseModel(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase__ = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = GPTNeoXJapaneseForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase__ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = True
UpperCAmelCase__ = GPTNeoXJapaneseForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
UpperCAmelCase__ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
UpperCAmelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase__ = model(lowercase_ , attention_mask=lowercase_ , output_hidden_states=lowercase_ )
UpperCAmelCase__ = output_from_no_past["""hidden_states"""][0]
UpperCAmelCase__ = model(
lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCAmelCase_ : str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ : Optional[int] = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = GPTNeoXJapaneseModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ = None
self.model_tester.create_and_check_model_as_decoder(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = """abeja/gpt-neox-japanese-2.7b"""
UpperCAmelCase__ = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCAmelCase__ = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCAmelCase__ = GPTNeoXJapaneseTokenizer.from_pretrained(lowercase_ )
UpperCAmelCase__ = GPTNeoXJapaneseForCausalLM.from_pretrained(lowercase_ )
UpperCAmelCase__ = []
for prompt in prompts:
UpperCAmelCase__ = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
UpperCAmelCase__ = model.generate(lowercase_ , max_length=50 )
UpperCAmelCase__ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 346
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""image_processor""", """tokenizer"""]
__UpperCamelCase = """LayoutLMv2ImageProcessor"""
__UpperCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self :Any , lowercase_ :int=None , lowercase_ :Union[str, Any]=None , **lowercase_ :Optional[Any] ) -> Dict:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
UpperCAmelCase = kwargs.pop('feature_extractor' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :str , lowercase_ :Optional[int] , lowercase_ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ :Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ :Optional[Union[List[int], List[List[int]]]] = None , lowercase_ :bool = True , lowercase_ :Union[bool, str, PaddingStrategy] = False , lowercase_ :Union[bool, str, TruncationStrategy] = None , lowercase_ :Optional[int] = None , lowercase_ :int = 0 , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = True , lowercase_ :Optional[Union[str, TensorType]] = None , **lowercase_ :Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features['words']
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
UpperCAmelCase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] )
UpperCAmelCase = images
return encoded_inputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :List[Any] , lowercase_ :Any ) -> Optional[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowercase_ )} and {len(lowercase_ )}""" )
return images_with_overflow
def UpperCAmelCase__ ( self :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Tuple:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self :int ) -> Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 78
| 0
|
"""simple docstring"""
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : int = 0
lowerCamelCase : Dict = len(lowercase_ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCamelCase : Optional[Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase_ ):
return None
lowerCamelCase : Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowerCamelCase : Dict = left
lowerCamelCase : List[Any] = point
elif point > right:
lowerCamelCase : Optional[Any] = right
lowerCamelCase : Any = point
else:
if item < current_item:
lowerCamelCase : Union[str, Any] = point - 1
else:
lowerCamelCase : Union[str, Any] = point + 1
return None
def UpperCAmelCase ( a_, a_, a_, a_ ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCamelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase_ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowercase_, lowercase_, lowercase_, lowercase_ )
elif point > right:
return interpolation_search_by_recursion(lowercase_, lowercase_, lowercase_, lowercase_ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowercase_, lowercase_, lowercase_, point - 1 )
else:
return interpolation_search_by_recursion(
lowercase_, lowercase_, point + 1, lowercase_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
if collection != sorted(lowercase_ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
_A = 0
if debug == 1:
_A = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
_A = 6_7
_A = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('Not found')
| 357
|
"""simple docstring"""
def UpperCAmelCase ( a_ = 10 ):
'''simple docstring'''
if not isinstance(a_, a_ ) or n < 0:
raise ValueError('Invalid input' )
lowerCamelCase : Union[str, Any] = 10**n
lowerCamelCase : int = 2_8433 * (pow(2, 783_0457, a_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(1_0) = }""")
| 205
| 0
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_A = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
_A = {
"""169M""": 768,
"""430M""": 1_024,
"""1B5""": 2_048,
"""3B""": 2_560,
"""7B""": 4_096,
"""14B""": 5_120,
}
def lowerCamelCase__ ( a__ : Tuple ) -> int:
UpperCamelCase_ = list(state_dict.keys() )
for name in state_dict_keys:
UpperCamelCase_ = state_dict.pop(lowercase_ )
# emb -> embedding
if name.startswith("""emb.""" ):
UpperCamelCase_ = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
UpperCamelCase_ = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
UpperCamelCase_ = re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , lowercase_ )
# ffn -> feed_forward
UpperCamelCase_ = re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , lowercase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
UpperCamelCase_ = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
UpperCamelCase_ = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
UpperCamelCase_ = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
UpperCamelCase_ = """rwkv.""" + name
UpperCamelCase_ = weight
return state_dict
def lowerCamelCase__ ( a__ : Tuple , a__ : Optional[int] , a__ : List[str] , a__ : Tuple=None , a__ : List[str]=None , a__ : Dict=False , a__ : str=None ) -> Any:
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
UpperCamelCase_ = 5_0277
UpperCamelCase_ = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
UpperCamelCase_ = PreTrainedTokenizerFast(tokenizer_file=lowercase_ )
UpperCamelCase_ = len(lowercase_ )
tokenizer.save_pretrained(lowercase_ )
# 2. Build the config
UpperCamelCase_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCamelCase_ = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
UpperCamelCase_ = RwkvConfig(
vocab_size=lowercase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowercase_ )
# 3. Download model file then convert state_dict
UpperCamelCase_ = hf_hub_download(lowercase_ , lowercase_ )
UpperCamelCase_ = torch.load(lowercase_ , map_location="""cpu""" )
UpperCamelCase_ = convert_state_dict(lowercase_ )
# 4. Split in shards and save
UpperCamelCase_ , UpperCamelCase_ = shard_checkpoint(lowercase_ )
for shard_file, shard in shards.items():
torch.save(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
if index is not None:
UpperCamelCase_ = os.path.join(lowercase_ , lowercase_ )
# Save the index as well
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase_ = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.""" )
UpperCamelCase_ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCamelCase_ = torch.load(os.path.join(lowercase_ , lowercase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase_ , lowercase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(lowercase_ )
model.push_to_hub(lowercase_ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(lowercase_ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
_A = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 122
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = args.log_outputs
A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
A__ = load_metric('''wer''' )
A__ = load_metric('''cer''' )
# compute metrics
A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
A__ = f"""WER: {wer_result}\nCER: {cer_result}"""
print(lowercase_ )
with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f"""log_{dataset_id}_predictions.txt"""
A__ = f"""log_{dataset_id}_targets.txt"""
with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowercase_ , lowercase_ ):
p.write(f"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowercase_ , with_indices=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(lowercase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
A__ = ''' '''.join(text.split(lowercase_ ) )
return text
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_ ):
A__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['''text''']
A__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCamelCase : str = parser.parse_args()
main(args)
| 14
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = DPTConfig()
if "large" in checkpoint_url:
UpperCAmelCase = 1024
UpperCAmelCase = 4096
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = [5, 11, 17, 23]
UpperCAmelCase = [256, 512, 1024, 1024]
UpperCAmelCase = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 150
UpperCAmelCase = """huggingface/label-files"""
UpperCAmelCase = """ade20k-id2label.json"""
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(_snake_case , _snake_case , repo_type="""dataset""" ) ) , """r""" ) )
UpperCAmelCase = {int(_snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = [1, 150, 480, 480]
return config, expected_shape
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _a ( _snake_case ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
UpperCAmelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
UpperCAmelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
UpperCAmelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
UpperCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
UpperCAmelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
UpperCAmelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
UpperCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
UpperCAmelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
UpperCAmelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
UpperCAmelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
UpperCAmelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
UpperCAmelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
UpperCAmelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
UpperCAmelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
UpperCAmelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
UpperCAmelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
UpperCAmelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
UpperCAmelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
UpperCAmelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
UpperCAmelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
UpperCAmelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
UpperCAmelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
UpperCAmelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
UpperCAmelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
UpperCAmelCase = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def _a ( ):
"""simple docstring"""
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = get_dpt_config(_snake_case )
# load original state_dict from URL
UpperCAmelCase = torch.hub.load_state_dict_from_url(_snake_case , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_snake_case )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(_snake_case )
UpperCAmelCase = val
# read in qkv matrices
read_in_q_k_v(_snake_case , _snake_case )
# load HuggingFace model
UpperCAmelCase = DPTForSemanticSegmentation(_snake_case ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# Check outputs on an image
UpperCAmelCase = 480 if """ade""" in checkpoint_url else 384
UpperCAmelCase = DPTImageProcessor(size=_snake_case )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(_snake_case , return_tensors="""pt""" )
# forward pass
UpperCAmelCase = model(**_snake_case ).logits if """ade""" in checkpoint_url else model(**_snake_case ).predicted_depth
# Assert logits
UpperCAmelCase = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
UpperCAmelCase = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(_snake_case )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _snake_case , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _snake_case )
)
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
_UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 234
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = """▁"""
_UpperCamelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
_UpperCamelCase = {
"""google/pegasus-xsum""": 512,
}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self ,A=None ,A=None ,A="<pad>" ,A="</s>" ,A="<unk>" ,A="<mask_2>" ,A="<mask_1>" ,A=None ,A=103 ,**A ,):
UpperCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(A ,A ):
raise TypeError(
F'''additional_special_tokens should be of type {type(A )}, but is'''
F''' {type(A )}''' )
UpperCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(A ) ,self.offset - 1 )
]
if len(set(A ) ) != len(A ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCAmelCase = additional_special_tokens_extended
else:
UpperCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 ,self.offset )]
super().__init__(
A ,tokenizer_file=A ,pad_token=A ,eos_token=A ,unk_token=A ,mask_token=A ,mask_token_sent=A ,offset=A ,additional_special_tokens=A ,**A ,)
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _UpperCamelCase ( self ,A ,A = None ,A = False ):
if already_has_special_tokens:
return self._special_token_mask(A )
elif token_ids_a is None:
return self._special_token_mask(A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _UpperCamelCase ( self ,A ,A=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self ,A ,A = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file ,A )
return (out_vocab_file,)
| 234
| 1
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__UpperCAmelCase : Any = input("Enter image url: ").strip()
print(f'Downloading image from {url} ...')
__UpperCAmelCase : int = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
__UpperCAmelCase : Optional[int] = soup.find("meta", {"property": "og:image"})["content"]
__UpperCAmelCase : str = requests.get(image_url).content
__UpperCAmelCase : Optional[int] = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 111
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = GPTSanJapaneseTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {"""do_clean_text""": False, """add_prefix_space""": False}
def UpperCAmelCase__ ( self : Union[str, Any] ):
super().setUp()
# fmt: off
__snake_case: str = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__snake_case: List[Any] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__snake_case: Optional[int] = {"""unk_token""": """<unk>"""}
__snake_case: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(A ) )
def UpperCAmelCase__ ( self : Optional[int] , **A : int ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Dict ):
__snake_case: Tuple = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__snake_case: str = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] , A : Optional[int] ):
__snake_case , __snake_case: Optional[int] = self.get_input_output_texts(A )
__snake_case: Optional[Any] = tokenizer.encode(A , add_special_tokens=A )
__snake_case: int = tokenizer.decode(A , clean_up_tokenization_spaces=A )
return text, ids
def UpperCAmelCase__ ( self : int ):
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Dict ):
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Optional[int] ):
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = self.get_tokenizer()
# Testing tokenization
__snake_case: List[str] = """こんにちは、世界。 こんばんは、㔺界。"""
__snake_case: Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__snake_case: Any = tokenizer.tokenize(A )
self.assertListEqual(A , A )
# Testing conversion to ids without special tokens
__snake_case: str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__snake_case: str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , A )
# Testing conversion to ids with special tokens
__snake_case: Optional[int] = tokens + [tokenizer.unk_token]
__snake_case: List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__snake_case: str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = self.get_tokenizer()
# Testing tokenization
__snake_case: Optional[Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__snake_case: Optional[Any] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__snake_case: Union[str, Any] = tokenizer.encode(A )
__snake_case: List[Any] = tokenizer.decode(A )
self.assertEqual(A , A )
@slow
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__snake_case: Any = """こんにちは、世界。"""
__snake_case: Tuple = """こんばんは、㔺界。😀"""
__snake_case: Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__snake_case: int = tokenizer.encode(prefix_text + input_text )
__snake_case: Union[str, Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__snake_case: Tuple = tokenizer.encode(A , prefix_text=A )
__snake_case: Union[str, Any] = tokenizer.decode(A )
__snake_case: Dict = tokenizer.decode(A )
__snake_case: Optional[Any] = tokenizer.decode(A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
@slow
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__snake_case: Optional[int] = """こんにちは、世界。"""
__snake_case: Any = """こんばんは、㔺界。😀"""
__snake_case: Optional[int] = len(tokenizer.encode(A ) ) - 2
__snake_case: str = len(tokenizer.encode(A ) ) - 2
__snake_case: Dict = [1] + [0] * (len_prefix + len_text + 1)
__snake_case: str = [1] * (len_prefix + len_text + 1) + [0]
__snake_case: List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__snake_case: int = tokenizer(prefix_text + input_text ).token_type_ids
__snake_case: Optional[int] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__snake_case: Tuple = tokenizer(A , prefix_text=A ).token_type_ids
self.assertListEqual(A , A )
self.assertListEqual(A , A )
self.assertListEqual(A , A )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__snake_case: int = tokenizer.encode("""あンいワ""" )
__snake_case: Optional[int] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__snake_case: List[Any] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(A ) , tokenizer.decode(A ) )
self.assertEqual(tokenizer.decode(A ) , tokenizer.decode(A ) )
self.assertNotEqual(A , A )
self.assertNotEqual(A , A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase__ ( self : int ):
__snake_case: List[str] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__snake_case: Union[str, Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__snake_case: Optional[int] = tokenizer(A , padding=A )
__snake_case: int = tokenizer.batch_encode_plus(A , padding=A )
# fmt: off
__snake_case: List[str] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
__snake_case: int = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__snake_case: Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , A )
self.assertListEqual(x_token.token_type_ids , A )
self.assertListEqual(x_token.attention_mask , A )
self.assertListEqual(x_token_a.input_ids , A )
self.assertListEqual(x_token_a.token_type_ids , A )
self.assertListEqual(x_token_a.attention_mask , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase__ ( self : List[str] ):
# tokenizer has no padding token
pass
| 111
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[int] ,*_snake_case : Union[str, Any] ,**_snake_case : Dict ) -> None:
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 362
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase_ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase_ = 'RegNetConfig'
# Base docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = 'tabby, tabby cat'
lowerCAmelCase_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,_snake_case : int ,_snake_case : int ,_snake_case : int = 3 ,_snake_case : int = 1 ,_snake_case : int = 1 ,_snake_case : Optional[str] = "relu" ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = nn.Convad(
_snake_case ,_snake_case ,kernel_size=_snake_case ,stride=_snake_case ,padding=kernel_size // 2 ,groups=_snake_case ,bias=_snake_case ,)
lowercase__ : List[Any] = nn.BatchNormad(_snake_case )
lowercase__ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.convolution(_snake_case )
lowercase__ : Tuple = self.normalization(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : RegNetConfig ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
lowercase__ : str = config.num_channels
def UpperCAmelCase ( self : int ,_snake_case : Dict ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__ : Optional[int] = self.embedder(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ : List[str] = nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ,stride=_snake_case ,bias=_snake_case )
lowercase__ : Any = nn.BatchNormad(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ) -> Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.convolution(_snake_case )
lowercase__ : Optional[int] = self.normalization(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : int ,_snake_case : int ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ : Dict = nn.Sequential(
nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.Sigmoid() ,)
def UpperCAmelCase ( self : int ,_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.pooler(_snake_case )
lowercase__ : Union[str, Any] = self.attention(_snake_case )
lowercase__ : List[str] = hidden_state * attention
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = in_channels != out_channels or stride != 1
lowercase__ : Optional[int] = max(1 ,out_channels // config.groups_width )
lowercase__ : str = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : Optional[int] = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : str = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = hidden_state
lowercase__ : Union[str, Any] = self.layer(_snake_case )
lowercase__ : List[Any] = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : Optional[int] = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = in_channels != out_channels or stride != 1
lowercase__ : List[str] = max(1 ,out_channels // config.groups_width )
lowercase__ : Tuple = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : str = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetSELayer(_snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : str = hidden_state
lowercase__ : Optional[Any] = self.layer(_snake_case )
lowercase__ : int = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : str = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ,_snake_case : int = 2 ,) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowercase__ : Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_snake_case ,_snake_case ,_snake_case ,stride=_snake_case ,) ,*[layer(_snake_case ,_snake_case ,_snake_case ) for _ in range(depth - 1 )] ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.layers(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : RegNetConfig ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : str = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
lowercase__ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_snake_case ,config.depths[1:] ):
self.stages.append(RegNetStage(_snake_case ,_snake_case ,_snake_case ,depth=_snake_case ) )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ,_snake_case : bool = False ,_snake_case : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : int = hidden_states + (hidden_state,)
lowercase__ : Any = stage_module(_snake_case )
if output_hidden_states:
lowercase__ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = RegNetConfig
lowerCAmelCase : List[Any] = "regnet"
lowerCAmelCase : Optional[int] = "pixel_values"
lowerCAmelCase : Union[str, Any] = True
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' )
elif isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Any=False ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : str = value
lowerCAmelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Any = config
lowercase__ : List[str] = RegNetEmbeddings(_snake_case )
lowercase__ : Any = RegNetEncoder(_snake_case )
lowercase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Tensor ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Union[str, Any] = self.embedder(_snake_case )
lowercase__ : List[Any] = self.encoder(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : str = encoder_outputs[0]
lowercase__ : Optional[int] = self.pooler(_snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case ,pooler_output=_snake_case ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : int ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Optional[Any] = config.num_labels
lowercase__ : int = RegNetModel(_snake_case )
# classification head
lowercase__ : str = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[torch.FloatTensor] = None ,_snake_case : Optional[torch.LongTensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[Any] = self.regnet(_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Union[str, Any] = self.classifier(_snake_case )
lowercase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ : List[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ : Dict = '''single_label_classification'''
else:
lowercase__ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowercase__ : List[Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowercase__ : Tuple = loss_fct(_snake_case ,_snake_case )
elif self.config.problem_type == "single_label_classification":
lowercase__ : Tuple = CrossEntropyLoss()
lowercase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ : Any = BCEWithLogitsLoss()
lowercase__ : Union[str, Any] = loss_fct(_snake_case ,_snake_case )
if not return_dict:
lowercase__ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
| 302
| 0
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_snake_case : Dict = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_snake_case : List[str] = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
_snake_case : List[str] = "zero2"
_snake_case : Any = "zero3"
_snake_case : Dict = [ZEROa, ZEROa]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__snake_case : Optional[Any] = parameterized.to_safe_name("_".join(str(__lowerCamelCase ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
_snake_case : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a (_lowerCAmelCase ):
"""simple docstring"""
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Union[str, Any]:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] ) -> int:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : int ) -> Dict:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : str , lowerCamelCase : str , lowerCamelCase : Any ) -> str:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
def __snake_case ( self : str , lowerCamelCase : List[Any] ) -> Union[str, Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int = 10 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : bool = True , ) -> Tuple:
__snake_case : Any = models[model]
__snake_case : Tuple = self.run_trainer(
stage=lowerCamelCase , model_name=lowerCamelCase , eval_steps=lowerCamelCase , num_train_epochs=1 , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
self.do_checks(lowerCamelCase )
return output_dir
def __snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int = 10 , lowerCamelCase : int = 1 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , ) -> Tuple:
__snake_case : Optional[int] = self.get_auto_remove_tmp_dir("./xxx" , after=lowerCamelCase )
__snake_case : Optional[int] = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowerCamelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__snake_case : Optional[int] = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__snake_case : Dict = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__snake_case : Any = self.get_launcher(lowerCamelCase )
__snake_case : Optional[Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
return output_dir
def __snake_case ( self : str , lowerCamelCase : str=False ) -> Any:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__snake_case : Dict = min(2 , get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 123
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_snake_case : int = logging.get_logger(__name__)
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : str = None , lowerCamelCase : uuid.UUID = None , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None ) -> int:
if not conversation_id:
__snake_case : Optional[Any] = uuid.uuida()
if past_user_inputs is None:
__snake_case : List[Any] = []
if generated_responses is None:
__snake_case : Optional[int] = []
__snake_case : uuid.UUID = conversation_id
__snake_case : List[str] = past_user_inputs
__snake_case : List[str] = generated_responses
__snake_case : Optional[str] = text
def __eq__( self : int , lowerCamelCase : List[str] ) -> Dict:
if not isinstance(lowerCamelCase , lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __snake_case ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : bool = False ) -> Optional[Any]:
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
__snake_case : Dict = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__snake_case : List[str] = text
def __snake_case ( self : List[str] ) -> Dict:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__snake_case : Optional[Any] = None
def __snake_case ( self : List[str] , lowerCamelCase : str ) -> List[Any]:
self.generated_responses.append(lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> List[str]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Optional[Any] ) -> Dict:
__snake_case : Any = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__snake_case : List[Any] = "user" if is_user else "bot"
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
_lowerCAmelCase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : List[str] ) -> Any:
super().__init__(*lowerCamelCase , **lowerCamelCase )
if self.tokenizer.pad_token_id is None:
__snake_case : Dict = self.tokenizer.eos_token
def __snake_case ( self : Dict , lowerCamelCase : List[str]=None , lowerCamelCase : int=None , lowerCamelCase : Optional[Any]=None , **lowerCamelCase : Any ) -> Any:
__snake_case : Union[str, Any] = {}
__snake_case : Optional[int] = {}
__snake_case : Optional[Any] = {}
if min_length_for_response is not None:
__snake_case : int = min_length_for_response
if minimum_tokens is not None:
__snake_case : Tuple = minimum_tokens
if "max_length" in generate_kwargs:
__snake_case : Any = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__snake_case : Any = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , lowerCamelCase : Union[Conversation, List[Conversation]] , lowerCamelCase : Optional[Any]=0 , **lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
__snake_case : Optional[Any] = super().__call__(lowerCamelCase , num_workers=lowerCamelCase , **lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) == 1:
return outputs[0]
return outputs
def __snake_case ( self : Any , lowerCamelCase : Conversation , lowerCamelCase : Any=32 ) -> Dict[str, Any]:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
__snake_case : Tuple = self.tokenizer._build_conversation_input_ids(lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__snake_case : Tuple = self._legacy_parse_and_tokenize(lowerCamelCase )
if self.framework == "pt":
__snake_case : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__snake_case : Union[str, Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __snake_case ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict=10 , **lowerCamelCase : Tuple ) -> List[str]:
__snake_case : Tuple = generate_kwargs.get("max_length" , self.model.config.max_length )
__snake_case : List[str] = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__snake_case : Optional[int] = max_length - minimum_tokens
__snake_case : List[Any] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
__snake_case : str = model_inputs["attention_mask"][:, -trim:]
__snake_case : Any = model_inputs.pop("conversation" )
__snake_case : str = max_length
__snake_case : Optional[int] = self.model.generate(**lowerCamelCase , **lowerCamelCase )
if self.model.config.is_encoder_decoder:
__snake_case : List[Any] = 1
else:
__snake_case : Union[str, Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __snake_case ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Tuple=True ) -> Any:
__snake_case : Optional[int] = model_outputs["output_ids"]
__snake_case : Optional[Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , )
__snake_case : Optional[int] = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowerCamelCase )
return conversation
def __snake_case ( self : Optional[Any] , lowerCamelCase : Conversation ) -> Dict:
__snake_case : Optional[Any] = self.tokenizer.eos_token_id
__snake_case : Any = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
if len(lowerCamelCase ) > self.tokenizer.model_max_length:
__snake_case : Tuple = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 123
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase_ = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
lowerCamelCase_ = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
lowerCamelCase_ = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def lowerCAmelCase_ ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , lowerCamelCase = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCamelCase , hypotheses=lowerCamelCase , min_len=lowerCamelCase , max_len=lowerCamelCase )
}
| 34
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase_ = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=8 ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
snake_case_ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(
text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , movq=lowerCamelCase , )
snake_case_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
if latents is None:
snake_case_ = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
snake_case_ = latents.to(lowerCamelCase )
snake_case_ = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , ) -> Any:
snake_case_ = len(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else 1
# get prompt text embeddings
snake_case_ = self.tokenizer(
lowerCamelCase , padding="""max_length""" , truncation=lowerCamelCase , max_length=77 , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="""pt""" , )
snake_case_ = text_inputs.input_ids
snake_case_ = self.tokenizer(lowerCamelCase , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCamelCase , lowerCamelCase ):
snake_case_ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
snake_case_ = text_input_ids.to(lowerCamelCase )
snake_case_ = text_inputs.attention_mask.to(lowerCamelCase )
snake_case_ , snake_case_ = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
snake_case_ = prompt_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = text_encoder_hidden_states.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = text_mask.repeat_interleave(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
snake_case_ = 42
if negative_prompt is None:
snake_case_ = [""""""] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='''
f''' {type(lowerCamelCase )}.''' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
snake_case_ = negative_prompt
snake_case_ = self.tokenizer(
lowerCamelCase , padding="""max_length""" , max_length=77 , truncation=lowerCamelCase , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="""pt""" , )
snake_case_ = uncond_input.input_ids.to(lowerCamelCase )
snake_case_ = uncond_input.attention_mask.to(lowerCamelCase )
snake_case_ , snake_case_ = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = negative_prompt_embeds.shape[1]
snake_case_ = negative_prompt_embeds.repeat(1 , lowerCamelCase )
snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCamelCase )
snake_case_ = uncond_text_encoder_hidden_states.shape[1]
snake_case_ = uncond_text_encoder_hidden_states.repeat(1 , lowerCamelCase , 1 )
snake_case_ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowerCamelCase , -1 )
snake_case_ = uncond_text_mask.repeat_interleave(lowerCamelCase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
snake_case_ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
snake_case_ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase_ ( self , lowerCamelCase=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case_ = torch.device(f'''cuda:{gpu_id}''' )
snake_case_ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
snake_case_ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
snake_case_ , snake_case_ = cpu_offload_with_hook(lowerCamelCase , lowerCamelCase , prev_module_hook=lowerCamelCase )
if self.safety_checker is not None:
snake_case_ , snake_case_ = cpu_offload_with_hook(self.safety_checker , lowerCamelCase , prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
snake_case_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self ) -> List[Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 100 , lowerCamelCase = 4.0 , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , ) -> Union[str, Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = len(lowerCamelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}''' )
snake_case_ = self._execution_device
snake_case_ = batch_size * num_images_per_prompt
snake_case_ = guidance_scale > 1.0
snake_case_ , snake_case_ , snake_case_ = self._encode_prompt(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = torch.cat(lowerCamelCase , dim=0 )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = torch.cat(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
snake_case_ = image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = negative_image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowerCamelCase )
self.scheduler.set_timesteps(lowerCamelCase , device=lowerCamelCase )
snake_case_ = self.scheduler.timesteps
snake_case_ = self.unet.config.in_channels
snake_case_ , snake_case_ = get_new_h_w(lowerCamelCase , lowerCamelCase , self.movq_scale_factor )
# create initial latent
snake_case_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowerCamelCase , lowerCamelCase , lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
snake_case_ = self.unet(
sample=lowerCamelCase , timestep=lowerCamelCase , encoder_hidden_states=lowerCamelCase , added_cond_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0]
if do_classifier_free_guidance:
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
snake_case_ , snake_case_ = noise_pred.chunk(2 )
snake_case_ , snake_case_ = variance_pred.chunk(2 )
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(
lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase , ).prev_sample
# post-processing
snake_case_ = self.movq.decode(lowerCamelCase , force_not_quantize=lowerCamelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
snake_case_ = image * 0.5 + 0.5
snake_case_ = image.clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 34
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__UpperCamelCase = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 69
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __lowerCAmelCase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__( self , lowerCAmelCase=None , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(features=lowerCAmelCase )
_lowercase =torch_tensor_kwargs
import torch # noqa import torch at initialization
def A__ ( self , lowerCAmelCase ) -> int:
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase , lowerCAmelCase ) and column:
if all(
isinstance(lowerCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCAmelCase )
return column
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase , (str, bytes, type(lowerCAmelCase )) ):
return value
elif isinstance(lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_lowercase ={}
if isinstance(lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_lowercase ={'dtype': torch.intaa}
elif isinstance(lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_lowercase ={'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCAmelCase , PIL.Image.Image ):
_lowercase =np.asarray(lowerCAmelCase )
return torch.tensor(lowerCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCAmelCase , '__array__' ) and not isinstance(lowerCAmelCase , torch.Tensor ):
_lowercase =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCAmelCase , map_list=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Mapping:
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_row(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_row(lowerCAmelCase )
return self.recursive_tensorize(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> "torch.Tensor":
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_column(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_column(lowerCAmelCase , pa_table.column_names[0] )
_lowercase =self.recursive_tensorize(lowerCAmelCase )
_lowercase =self._consolidate(lowerCAmelCase )
return column
def A__ ( self , lowerCAmelCase ) -> Mapping:
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_batch(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_batch(lowerCAmelCase )
_lowercase =self.recursive_tensorize(lowerCAmelCase )
for column_name in batch:
_lowercase =self._consolidate(batch[column_name] )
return batch
| 205
| 0
|
import math
from collections.abc import Callable
def _UpperCamelCase (a__ :Callable[[float], float] , a__ :float , a__ :float ):
"""simple docstring"""
UpperCamelCase__ = xa
UpperCamelCase__ = xa
while True:
if x_n == x_na or function(a__ ) == function(a__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
UpperCamelCase__ = x_na - (
function(a__ ) / ((function(a__ ) - function(a__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
UpperCamelCase__ = x_na
UpperCamelCase__ = x_na
def _UpperCamelCase (a__ :float ):
"""simple docstring"""
return math.pow(a__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 87
|
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = """masked_bert"""
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase="topK" , __lowerCAmelCase="constant" , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = pruning_method
UpperCamelCase__ = mask_init
UpperCamelCase__ = mask_scale
| 87
| 1
|
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : str = "AutoTokenizer"
lowerCAmelCase : Tuple = ["tokenizer"]
lowerCAmelCase : Dict = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None ) ->Optional[Any]:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Dict = speaker_embeddings
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]="speaker_embeddings_path.json" , **lowerCamelCase__ : int ) ->Any:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase : List[str] = get_file_from_repo(
lowerCamelCase__ , lowerCamelCase__ , subfolder=kwargs.pop("subfolder" , lowerCamelCase__ ) , cache_dir=kwargs.pop("cache_dir" , lowerCamelCase__ ) , force_download=kwargs.pop("force_download" , lowerCamelCase__ ) , proxies=kwargs.pop("proxies" , lowerCamelCase__ ) , resume_download=kwargs.pop("resume_download" , lowerCamelCase__ ) , local_files_only=kwargs.pop("local_files_only" , lowerCamelCase__ ) , use_auth_token=kwargs.pop("use_auth_token" , lowerCamelCase__ ) , revision=kwargs.pop("revision" , lowerCamelCase__ ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(lowerCamelCase__ , lowerCamelCase__ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_UpperCAmelCase : int = None
else:
with open(lowerCamelCase__ ) as speaker_embeddings_json:
_UpperCAmelCase : Optional[Any] = json.load(lowerCamelCase__ )
else:
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
return cls(tokenizer=lowerCamelCase__ , speaker_embeddings=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]="speaker_embeddings_path.json" , lowerCamelCase__ : Tuple="speaker_embeddings" , lowerCamelCase__ : bool = False , **lowerCamelCase__ : str , ) ->Union[str, Any]:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCamelCase__ , lowerCamelCase__ , "v2" ) , exist_ok=lowerCamelCase__ )
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase : Optional[int] = self._load_voice_preset(lowerCamelCase__ )
_UpperCAmelCase : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , lowerCamelCase__ , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=lowerCamelCase__ , )
_UpperCAmelCase : int = os.path.join(lowerCamelCase__ , F"""{prompt_key}_{key}.npy""" )
_UpperCAmelCase : Optional[int] = tmp_dict
with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "w" ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
super().save_pretrained(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str = None , **lowerCamelCase__ : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.speaker_embeddings[voice_preset]
_UpperCAmelCase : Union[str, Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_UpperCAmelCase : List[str] = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , lowerCamelCase__ ) , cache_dir=kwargs.pop("cache_dir" , lowerCamelCase__ ) , force_download=kwargs.pop("force_download" , lowerCamelCase__ ) , proxies=kwargs.pop("proxies" , lowerCamelCase__ ) , resume_download=kwargs.pop("resume_download" , lowerCamelCase__ ) , local_files_only=kwargs.pop("local_files_only" , lowerCamelCase__ ) , use_auth_token=kwargs.pop("use_auth_token" , lowerCamelCase__ ) , revision=kwargs.pop("revision" , lowerCamelCase__ ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_UpperCAmelCase : Optional[Any] = np.load(lowerCamelCase__ )
return voice_preset_dict
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[dict] = None ) ->Any:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : int , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Dict="pt" , lowerCamelCase__ : List[Any]=2_56 , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[Any]=False , **lowerCamelCase__ : Tuple , ) ->Tuple:
'''simple docstring'''
if voice_preset is not None and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
if (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase : Tuple = self._load_voice_preset(lowerCamelCase__ )
else:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not voice_preset.endswith(".npz" ):
_UpperCAmelCase : List[str] = voice_preset + ".npz"
_UpperCAmelCase : List[Any] = np.load(lowerCamelCase__ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
_UpperCAmelCase : int = self.tokenizer(
lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding="max_length" , max_length=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
if voice_preset is not None:
_UpperCAmelCase : int = voice_preset
return encoded_text
| 234
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
lowerCamelCase__ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Any = BertTokenizer
def __init__( self : int , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : str=True , lowerCamelCase__ : Tuple="[UNK]" , lowerCamelCase__ : str="[SEP]" , lowerCamelCase__ : Optional[Any]="[PAD]" , lowerCamelCase__ : List[str]="[CLS]" , lowerCamelCase__ : Union[str, Any]="[MASK]" , lowerCamelCase__ : str=True , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars
):
_UpperCAmelCase : str = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) )
_UpperCAmelCase : Optional[Any] = do_lower_case
_UpperCAmelCase : Any = strip_accents
_UpperCAmelCase : List[Any] = tokenize_chinese_chars
_UpperCAmelCase : int = normalizer_class(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = do_lower_case
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=None ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 234
| 1
|
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 367
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __lowerCAmelCase ( UpperCamelCase__ ) -> None:
create_state_space_tree(UpperCamelCase__ , [] , 0 )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__UpperCAmelCase =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 237
| 0
|
def _snake_case ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : set ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = len(_SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE_ : Any = 0
count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Any =(
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 302
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = IFInpaintingSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __a ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __a ( self , _a , _a=0 ) -> Any:
if str(_a ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_a )
else:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __a ( self ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self ) -> int:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> List[str]:
self._test_save_load_local()
def __a ( self ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 22
|
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bertabs'''
def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_pos
lowerCAmelCase_ = enc_layers
lowerCAmelCase_ = enc_hidden_size
lowerCAmelCase_ = enc_heads
lowerCAmelCase_ = enc_ff_size
lowerCAmelCase_ = enc_dropout
lowerCAmelCase_ = dec_layers
lowerCAmelCase_ = dec_hidden_size
lowerCAmelCase_ = dec_heads
lowerCAmelCase_ = dec_ff_size
lowerCAmelCase_ = dec_dropout
| 22
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( __a , unittest.TestCase ):
__a : int = UnCLIPImageVariationPipeline
__a : Any = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
__a : Any = IMAGE_VARIATION_BATCH_PARAMS
__a : Dict = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
__a : int = False
@property
def A ( self : Tuple ):
'''simple docstring'''
return 32
@property
def A ( self : Dict ):
'''simple docstring'''
return 32
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A ( self : int ):
'''simple docstring'''
return 100
@property
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def A ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(lowercase )
@property
def A ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowercase )
@property
def A ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
UpperCAmelCase = UnCLIPTextProjModel(**lowercase )
return model
@property
def A ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
UpperCAmelCase = UNetaDConditionModel(**lowercase )
return model
@property
def A ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def A ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(1 )
UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.dummy_decoder
UpperCAmelCase = self.dummy_text_proj
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_super_res_first
UpperCAmelCase = self.dummy_super_res_last
UpperCAmelCase = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_000 , )
UpperCAmelCase = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_000 , )
UpperCAmelCase = CLIPImageProcessor(crop_size=32 , size=32 )
UpperCAmelCase = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def A ( self : Union[str, Any] , lowercase : str , lowercase : Any=0 , lowercase : Optional[int]=True ):
'''simple docstring'''
UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
if str(lowercase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowercase )
else:
UpperCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
if pil_image:
UpperCAmelCase = input_image * 0.5 + 0.5
UpperCAmelCase = input_image.clamp(0 , 1 )
UpperCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase = DiffusionPipeline.numpy_to_pil(lowercase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase )
UpperCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = self.get_dummy_inputs(lowercase , pil_image=lowercase )
UpperCAmelCase = pipe(**lowercase )
UpperCAmelCase = output.images
UpperCAmelCase = self.get_dummy_inputs(lowercase , pil_image=lowercase )
UpperCAmelCase = pipe(
**lowercase , return_dict=lowercase , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase )
UpperCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = self.get_dummy_inputs(lowercase , pil_image=lowercase )
UpperCAmelCase = pipe(**lowercase )
UpperCAmelCase = output.images
UpperCAmelCase = self.get_dummy_inputs(lowercase , pil_image=lowercase )
UpperCAmelCase = pipe(
**lowercase , return_dict=lowercase , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase )
UpperCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = self.get_dummy_inputs(lowercase , pil_image=lowercase )
UpperCAmelCase = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
UpperCAmelCase = pipe(**lowercase )
UpperCAmelCase = output.images
UpperCAmelCase = self.get_dummy_inputs(lowercase , pil_image=lowercase )
UpperCAmelCase = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
UpperCAmelCase = pipe(
**lowercase , return_dict=lowercase , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
UpperCAmelCase = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = torch.device('''cpu''' )
class _a :
__a : Optional[Any] = 1
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase )
UpperCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = torch.Generator(device=lowercase ).manual_seed(0 )
UpperCAmelCase = pipe.decoder.dtype
UpperCAmelCase = 1
UpperCAmelCase = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
UpperCAmelCase = pipe.prepare_latents(
lowercase , dtype=lowercase , device=lowercase , generator=lowercase , latents=lowercase , scheduler=DummyScheduler() )
UpperCAmelCase = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
UpperCAmelCase = pipe.prepare_latents(
lowercase , dtype=lowercase , device=lowercase , generator=lowercase , latents=lowercase , scheduler=DummyScheduler() )
UpperCAmelCase = self.get_dummy_inputs(lowercase , pil_image=lowercase )
UpperCAmelCase = pipe(
**lowercase , decoder_latents=lowercase , super_res_latents=lowercase ).images
UpperCAmelCase = self.get_dummy_inputs(lowercase , pil_image=lowercase )
# Don't pass image, instead pass embedding
UpperCAmelCase = pipeline_inputs.pop('''image''' )
UpperCAmelCase = pipe.image_encoder(lowercase ).image_embeds
UpperCAmelCase = pipe(
**lowercase , decoder_latents=lowercase , super_res_latents=lowercase , image_embeddings=lowercase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
UpperCAmelCase = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase , expected_max_diff=lowercase )
@skip_mps
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = torch_device == '''cpu'''
UpperCAmelCase = True
UpperCAmelCase = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=lowercase , relax_max_difference=lowercase , additional_params_copy_to_batched_inputs=lowercase , )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
UpperCAmelCase = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowercase , additional_params_copy_to_batched_inputs=lowercase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowercase )
@skip_mps
def A ( self : Any ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def A ( self : Tuple ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
UpperCAmelCase = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = pipeline(
lowercase , generator=lowercase , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(lowercase , lowercase , 15 )
| 34
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A =logging.get_logger(__name__)
class _a ( __a ):
__a : str = ["""pixel_values"""]
def __init__( self : Optional[int] , lowercase : bool = True , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Any , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : int , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase )
def A ( self : Tuple , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase ):
UpperCAmelCase = target_sizes.numpy()
UpperCAmelCase = []
for idx in range(len(lowercase ) ):
UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase )
UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
UpperCAmelCase = logits.argmax(dim=1 )
UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 1
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__lowercase = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
__lowercase = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
__lowercase = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
__lowercase = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
__lowercase = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for tf_name, hf_name in patterns:
__UpperCamelCase :Optional[Any] = k.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return k
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = BigBirdPegasusConfig(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = BigBirdPegasusForConditionalGeneration(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = torch_model.state_dict()
__UpperCamelCase :Optional[Any] = {}
# separating decoder weights
__UpperCamelCase :int = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__UpperCamelCase :Any = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
__UpperCamelCase :List[Any] = [k.endswith(SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE ):
continue
__UpperCamelCase :Union[str, Any] = DECODER_PATTERNS
__UpperCamelCase :Union[str, Any] = rename_state_dict_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__UpperCamelCase :List[str] = v.T
__UpperCamelCase :Any = torch.from_numpy(SCREAMING_SNAKE_CASE )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
__UpperCamelCase :int = [k.endswith(SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE ):
continue
__UpperCamelCase :List[str] = REMAINING_PATTERNS
__UpperCamelCase :Optional[int] = rename_state_dict_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__UpperCamelCase :List[Any] = v.T
__UpperCamelCase :str = torch.from_numpy(SCREAMING_SNAKE_CASE )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__UpperCamelCase :Optional[int] = mapping['''model.embed_positions.weight''']
__UpperCamelCase :Optional[Any] = mapping.pop('''model.embed_positions.weight''' )
__UpperCamelCase :List[Any] = torch_model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = tf.train.list_variables(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = {}
__UpperCamelCase :Optional[Any] = ['''global_step''']
for name, shape in tqdm(SCREAMING_SNAKE_CASE , desc='''converting tf checkpoint to dict''' ):
__UpperCamelCase :Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCamelCase :List[Any] = tf.train.load_variable(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = array
return tf_weights
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = convert_bigbird_pegasus(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase = parser.parse_args()
__lowercase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 350
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__lowercase = '''bert-base-cased'''
__lowercase = '''google/pegasus-xsum'''
__lowercase = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
__lowercase = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
__lowercase = '''patrickvonplaten/t5-tiny-random'''
__lowercase = '''sshleifer/bart-tiny-random'''
__lowercase = '''sshleifer/tiny-mbart'''
__lowercase = '''sshleifer/tiny-marian-en-de'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = '''\n'''.join(SCREAMING_SNAKE_CASE )
Path(SCREAMING_SNAKE_CASE ).open('''w''' ).writelines(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE , f"""{split}.source""" ) , SCREAMING_SNAKE_CASE )
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE , f"""{split}.target""" ) , SCREAMING_SNAKE_CASE )
return tmp_dir
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__UpperCamelCase :List[Any] = max(len(tokenizer.encode(__lowercase)) for a in ARTICLES)
__UpperCamelCase :Optional[int] = max(len(tokenizer.encode(__lowercase)) for a in SUMMARIES)
__UpperCamelCase :int = 4
__UpperCamelCase :Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__UpperCamelCase , __UpperCamelCase :Tuple = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
__UpperCamelCase :str = SeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=__lowercase , max_target_length=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , )
__UpperCamelCase :Any = DataLoader(__lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(__lowercase , __lowercase)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__UpperCamelCase :Optional[int] = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def UpperCamelCase__ ( self , __lowercase) -> int:
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__UpperCamelCase :int = max(len(tokenizer.encode(__lowercase)) for a in ARTICLES)
__UpperCamelCase :Dict = max(len(tokenizer.encode(__lowercase)) for a in SUMMARIES)
__UpperCamelCase :Union[str, Any] = 4
__UpperCamelCase :List[str] = LegacySeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=20 , max_target_length=__lowercase , )
__UpperCamelCase :Dict = DataLoader(__lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''')
__UpperCamelCase :Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
__UpperCamelCase :str = tmp_dir.joinpath('''train.source''').open().readlines()
__UpperCamelCase :int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(__lowercase , __lowercase , 128 , __lowercase)
__UpperCamelCase :Union[str, Any] = {x.name for x in tmp_dir.iterdir()}
__UpperCamelCase :int = {x.name for x in save_dir.iterdir()}
__UpperCamelCase :Optional[int] = save_dir.joinpath('''train.source''').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__lowercase) < len(__lowercase)
assert len(__lowercase) == 1
assert len(packed_examples[0]) == sum(len(__lowercase) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''')
def UpperCamelCase__ ( self) -> List[Any]:
if not FAIRSEQ_AVAILABLE:
return
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = self._get_dataset(max_len=64)
__UpperCamelCase :Union[str, Any] = 64
__UpperCamelCase :Tuple = ds.make_dynamic_sampler(__lowercase , required_batch_size_multiple=__lowercase)
__UpperCamelCase :List[str] = [len(__lowercase) for x in batch_sampler]
assert len(set(__lowercase)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__lowercase) == len(__lowercase) # no dropped or added examples
__UpperCamelCase :int = DataLoader(__lowercase , batch_sampler=__lowercase , collate_fn=ds.collate_fn , num_workers=2)
__UpperCamelCase :List[str] = []
__UpperCamelCase :int = []
for batch in data_loader:
__UpperCamelCase :List[Any] = batch['''input_ids'''].shape
__UpperCamelCase :Dict = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__UpperCamelCase :Optional[int] = np.product(batch['''input_ids'''].shape)
num_src_per_batch.append(__lowercase)
if num_src_tokens > (max_tokens * 1.1):
failures.append(__lowercase)
assert num_src_per_batch[0] == max(__lowercase)
if failures:
raise AssertionError(f"""too many tokens in {len(__lowercase)} batches""")
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = self._get_dataset(max_len=512)
__UpperCamelCase :Any = 2
__UpperCamelCase :List[Any] = ds.make_sortish_sampler(__lowercase , shuffle=__lowercase)
__UpperCamelCase :List[Any] = DataLoader(__lowercase , batch_size=__lowercase , collate_fn=ds.collate_fn , num_workers=2)
__UpperCamelCase :Tuple = DataLoader(__lowercase , batch_size=__lowercase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowercase)
__UpperCamelCase :int = tokenizer.pad_token_id
def count_pad_tokens(__lowercase , __lowercase="input_ids"):
return [batch[k].eq(__lowercase).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__lowercase , k='''labels''')) < sum(count_pad_tokens(__lowercase , k='''labels'''))
assert sum(count_pad_tokens(__lowercase)) < sum(count_pad_tokens(__lowercase))
assert len(__lowercase) == len(__lowercase)
def UpperCamelCase__ ( self , __lowercase=1_000 , __lowercase=128) -> List[Any]:
if os.getenv('''USE_REAL_DATA''' , __lowercase):
__UpperCamelCase :Optional[Any] = '''examples/seq2seq/wmt_en_ro'''
__UpperCamelCase :Dict = max_len * 2 * 64
if not Path(__lowercase).joinpath('''train.len''').exists():
save_len_file(__lowercase , __lowercase)
else:
__UpperCamelCase :Union[str, Any] = '''examples/seq2seq/test_data/wmt_en_ro'''
__UpperCamelCase :Optional[int] = max_len * 4
save_len_file(__lowercase , __lowercase)
__UpperCamelCase :str = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :List[Any] = SeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=__lowercase , max_target_length=__lowercase , n_obs=__lowercase , )
return ds, max_tokens, tokenizer
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = self._get_dataset()
__UpperCamelCase :List[str] = set(DistributedSortishSampler(__lowercase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__lowercase))
__UpperCamelCase :Tuple = set(DistributedSortishSampler(__lowercase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__lowercase))
assert idsa.intersection(__lowercase) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained(__lowercase , use_fast=__lowercase)
if tok_name == MBART_TINY:
__UpperCamelCase :Optional[Any] = SeqaSeqDataset(
__lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
__UpperCamelCase :Tuple = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__UpperCamelCase :Tuple = SeqaSeqDataset(
__lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
__UpperCamelCase :Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__lowercase) == 1 if tok_name == BART_TINY else len(__lowercase) == 0
| 105
| 0
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class snake_case_ ( __A ):
__A : Dict = "M-CLIP"
def __init__( self : Union[str, Any] , lowercase_ : Optional[Any]=10_24 , lowercase_ : Optional[int]=7_68 , **lowercase_ : Tuple ) -> Optional[int]:
lowercase__ : Tuple = transformerDimSize
lowercase__ : Union[str, Any] = imageDimSize
super().__init__(**lowercase_ )
class snake_case_ ( __A ):
__A : int = MCLIPConfig
def __init__( self : int , lowercase_ : Any , *lowercase_ : Dict , **lowercase_ : Dict ) -> Any:
super().__init__(lowercase_ , *lowercase_ , **lowercase_ )
lowercase__ : Dict = XLMRobertaModel(lowercase_ )
lowercase__ : Optional[int] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any ) -> str:
lowercase__ : Dict = self.transformer(input_ids=lowercase_ , attention_mask=lowercase_ )[0]
lowercase__ : Tuple = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowercase_ ), embs
| 87
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Union[str, Any] = LEDTokenizer
__A : Union[str, Any] = LEDTokenizerFast
__A : Optional[Any] = True
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
lowercase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase ( self : Tuple ) -> int:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
self.assertIn("input_ids" , lowercase_ )
self.assertIn("attention_mask" , lowercase_ )
self.assertNotIn("labels" , lowercase_ )
self.assertNotIn("decoder_attention_mask" , lowercase_ )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization."]
lowercase__ : List[Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" )
lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" )
lowercase__ : Optional[int] = inputs["input_ids"]
lowercase__ : str = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = ["Summary of the text.", "Another summary."]
lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ )
lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]]
lowercase__ : Any = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
pass
def __UpperCamelCase ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 87
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCamelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
__lowerCAmelCase , padding="longest" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
UpperCamelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __lowerCAmelCase ) == "1":
UpperCamelCase = 2
# Initialize accelerator
UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["lr"]
UpperCamelCase = int(config["num_epochs"] )
UpperCamelCase = int(config["seed"] )
UpperCamelCase = int(config["batch_size"] )
UpperCamelCase = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase = MAX_GPU_BATCH_SIZE
set_seed(__lowerCAmelCase )
UpperCamelCase , UpperCamelCase = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase = model(**__lowerCAmelCase )
UpperCamelCase = outputs.loss
UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCamelCase = 0
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**__lowerCAmelCase )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__lowerCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __lowerCAmelCase )
def a__ ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 371
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowerCAmelCase__ = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = _TestCommandArgs(dataset=_SCREAMING_SNAKE_CASE , all_configs=_SCREAMING_SNAKE_CASE , save_infos=_SCREAMING_SNAKE_CASE )
UpperCamelCase = TestCommand(*_SCREAMING_SNAKE_CASE )
test_command.run()
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , "README.md" )
assert os.path.exists(_SCREAMING_SNAKE_CASE )
UpperCamelCase = DatasetInfosDict.from_directory(_SCREAMING_SNAKE_CASE )
UpperCamelCase = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_351_563,
"num_examples": 10_000,
},
{
"name": "validation",
"num_bytes": 238_418,
"num_examples": 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCamelCase , UpperCamelCase = getattr(dataset_infos["default"] , _SCREAMING_SNAKE_CASE ), getattr(expected_dataset_infos["default"] , _SCREAMING_SNAKE_CASE )
if key == "num_bytes":
assert is_apercent_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif key == "splits":
assert list(_SCREAMING_SNAKE_CASE ) == list(_SCREAMING_SNAKE_CASE )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 244
| 0
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Tuple = prime_factors(_lowerCAmelCase )
if is_square_free(_lowerCAmelCase ):
return -1 if len(_lowerCAmelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = (DPMSolverSDEScheduler,)
__lowercase = 10
def UpperCAmelCase_ ( self :List[Any] , **lowercase_ :Optional[int] )-> str:
A__ = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**lowercase_ )
return config
def UpperCAmelCase_ ( self :int )-> Dict:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Tuple:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCAmelCase_ ( self :Any )-> Optional[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> Union[str, Any]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def UpperCAmelCase_ ( self :Optional[int] )-> Dict:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type="v_prediction" )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def UpperCAmelCase_ ( self :Optional[int] )-> List[str]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def UpperCAmelCase_ ( self :Tuple )-> Dict:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ , use_karras_sigmas=lowercase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 237
| 0
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
lowerCAmelCase_ = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
lowerCAmelCase_ = PandasConfig
def _snake_case ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowercase_ : List[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
lowercase_ : Optional[Any] = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase_ : Any = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase_ : Dict = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase_ : List[str] = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase_ : str = table_cast(__SCREAMING_SNAKE_CASE , self.config.features.arrow_schema )
return pa_table
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f:
lowercase_ : Dict = pa.Table.from_pandas(pd.read_pickle(__SCREAMING_SNAKE_CASE ) )
yield i, self._cast_table(__SCREAMING_SNAKE_CASE )
| 362
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_lowercase : Tuple = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = super().to_dict()
for k, v in d.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : str = v.to_dict()
return d
| 264
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE :str = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Optional[int] = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
|
'''simple docstring'''
import string
from math import logaa
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
_UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]:
'''simple docstring'''
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("\n" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowercase ))
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 22
| 1
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def a_ ( _lowercase , _lowercase , _lowercase ):
if isinstance(_lowercase , torch.Tensor ):
return image
elif isinstance(_lowercase , PIL.Image.Image ):
_UpperCamelCase : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_UpperCamelCase : str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
_UpperCamelCase : Optional[int] = np.concatenate(_lowercase , axis=0 )
_UpperCamelCase : Optional[Any] = np.array(_lowercase ).astype(np.floataa ) / 255.0
_UpperCamelCase : str = image.transpose(0 , 3 , 1 , 2 )
_UpperCamelCase : int = 2.0 * image - 1.0
_UpperCamelCase : List[Any] = torch.from_numpy(_lowercase )
elif isinstance(image[0] , torch.Tensor ):
_UpperCamelCase : str = torch.cat(_lowercase , dim=0 )
return image
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase=0.9995 ):
if not isinstance(_lowercase , np.ndarray ):
_UpperCamelCase : Any = True
_UpperCamelCase : Optional[int] = va.device
_UpperCamelCase : Optional[int] = va.cpu().numpy()
_UpperCamelCase : Tuple = va.cpu().numpy()
_UpperCamelCase : Optional[Any] = np.sum(va * va / (np.linalg.norm(_lowercase ) * np.linalg.norm(_lowercase )) )
if np.abs(_lowercase ) > DOT_THRESHOLD:
_UpperCamelCase : Tuple = (1 - t) * va + t * va
else:
_UpperCamelCase : Any = np.arccos(_lowercase )
_UpperCamelCase : str = np.sin(_lowercase )
_UpperCamelCase : Optional[int] = theta_a * t
_UpperCamelCase : Dict = np.sin(_lowercase )
_UpperCamelCase : Dict = np.sin(theta_a - theta_t ) / sin_theta_a
_UpperCamelCase : str = sin_theta_t / sin_theta_a
_UpperCamelCase : Tuple = sa * va + sa * va
if inputs_are_torch:
_UpperCamelCase : Optional[int] = torch.from_numpy(_lowercase ).to(_lowercase )
return va
def a_ ( _lowercase , _lowercase ):
_UpperCamelCase : Union[str, Any] = F.normalize(_lowercase , dim=-1 )
_UpperCamelCase : Tuple = F.normalize(_lowercase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def a_ ( _lowercase , _lowercase ):
for param in model.parameters():
_UpperCamelCase : Tuple = value
class _a ( _lowerCAmelCase ):
def __init__( self : List[Any], lowerCAmelCase__ : AutoencoderKL, lowerCAmelCase__ : CLIPTextModel, lowerCAmelCase__ : CLIPModel, lowerCAmelCase__ : CLIPTokenizer, lowerCAmelCase__ : UNetaDConditionModel, lowerCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], lowerCAmelCase__ : CLIPFeatureExtractor, lowerCAmelCase__ : List[str]=None, lowerCAmelCase__ : List[str]=None, lowerCAmelCase__ : Any=None, ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=lowerCAmelCase__, text_encoder=lowerCAmelCase__, clip_model=lowerCAmelCase__, tokenizer=lowerCAmelCase__, unet=lowerCAmelCase__, scheduler=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, coca_model=lowerCAmelCase__, coca_tokenizer=lowerCAmelCase__, coca_transform=lowerCAmelCase__, )
_UpperCamelCase : Optional[int] = (
feature_extractor.size
if isinstance(feature_extractor.size, lowerCAmelCase__ )
else feature_extractor.size['''shortest_edge''']
)
_UpperCamelCase : List[str] = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, lowerCAmelCase__ )
set_requires_grad(self.clip_model, lowerCAmelCase__ )
def snake_case ( self : Union[str, Any], lowerCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> Dict:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def snake_case ( self : Tuple ) -> Tuple:
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase__ )
def snake_case ( self : int ) -> Tuple:
'''simple docstring'''
set_requires_grad(self.vae, lowerCAmelCase__ )
def snake_case ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
set_requires_grad(self.vae, lowerCAmelCase__ )
def snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
set_requires_grad(self.unet, lowerCAmelCase__ )
def snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
set_requires_grad(self.unet, lowerCAmelCase__ )
def snake_case ( self : Optional[int], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[str] = min(int(num_inference_steps * strength ), lowerCAmelCase__ )
_UpperCamelCase : Tuple = max(num_inference_steps - init_timestep, 0 )
_UpperCamelCase : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case ( self : str, lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Dict, lowerCAmelCase__ : int, lowerCAmelCase__ : Tuple, lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : int=None ) -> List[str]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__, torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(lowerCAmelCase__ )}""" )
_UpperCamelCase : Dict = image.to(device=lowerCAmelCase__, dtype=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
_UpperCamelCase : Union[str, Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase__ )
]
_UpperCamelCase : Dict = torch.cat(lowerCAmelCase__, dim=0 )
else:
_UpperCamelCase : Any = self.vae.encode(lowerCAmelCase__ ).latent_dist.sample(lowerCAmelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase : List[str] = 0.18_215 * init_latents
_UpperCamelCase : Union[str, Any] = init_latents.repeat_interleave(lowerCAmelCase__, dim=0 )
_UpperCamelCase : Optional[int] = randn_tensor(init_latents.shape, generator=lowerCAmelCase__, device=lowerCAmelCase__, dtype=lowerCAmelCase__ )
# get latents
_UpperCamelCase : str = self.scheduler.add_noise(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = init_latents
return latents
def snake_case ( self : str, lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : int = self.coca_transform(lowerCAmelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_UpperCamelCase : int = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
_UpperCamelCase : List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''', '''''' ).rstrip(''' .,''' )
def snake_case ( self : str, lowerCAmelCase__ : List[str], lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = self.feature_extractor.preprocess(lowerCAmelCase__ )
_UpperCamelCase : str = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_UpperCamelCase : Tuple = self.clip_model.get_image_features(lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCAmelCase__ )
_UpperCamelCase : Tuple = image_embeddings_clip.repeat_interleave(lowerCAmelCase__, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def snake_case ( self : Any, lowerCAmelCase__ : int, lowerCAmelCase__ : int, lowerCAmelCase__ : List[Any], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Union[str, Any], lowerCAmelCase__ : Tuple, lowerCAmelCase__ : Any, ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : List[Any] = latents.detach().requires_grad_()
_UpperCamelCase : Any = self.scheduler.scale_model_input(lowerCAmelCase__, lowerCAmelCase__ )
# predict the noise residual
_UpperCamelCase : Optional[Any] = self.unet(lowerCAmelCase__, lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_UpperCamelCase : Dict = self.scheduler.alphas_cumprod[timestep]
_UpperCamelCase : Tuple = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase : str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_UpperCamelCase : int = torch.sqrt(lowerCAmelCase__ )
_UpperCamelCase : Tuple = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, lowerCAmelCase__ ):
_UpperCamelCase : Any = self.scheduler.sigmas[index]
_UpperCamelCase : Union[str, Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase : Optional[Any] = 1 / 0.18_215 * sample
_UpperCamelCase : Dict = self.vae.decode(lowerCAmelCase__ ).sample
_UpperCamelCase : List[str] = (image / 2 + 0.5).clamp(0, 1 )
_UpperCamelCase : str = transforms.Resize(self.feature_extractor_size )(lowerCAmelCase__ )
_UpperCamelCase : Dict = self.normalize(lowerCAmelCase__ ).to(latents.dtype )
_UpperCamelCase : Any = self.clip_model.get_image_features(lowerCAmelCase__ )
_UpperCamelCase : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCAmelCase__ )
_UpperCamelCase : int = spherical_dist_loss(lowerCAmelCase__, lowerCAmelCase__ ).mean() * clip_guidance_scale
_UpperCamelCase : Union[str, Any] = -torch.autograd.grad(lowerCAmelCase__, lowerCAmelCase__ )[0]
if isinstance(self.scheduler, lowerCAmelCase__ ):
_UpperCamelCase : int = latents.detach() + grads * (sigma**2)
_UpperCamelCase : Union[str, Any] = noise_pred_original
else:
_UpperCamelCase : List[Any] = noise_pred_original - torch.sqrt(lowerCAmelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Dict, lowerCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], lowerCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], lowerCAmelCase__ : Optional[str] = None, lowerCAmelCase__ : Optional[str] = None, lowerCAmelCase__ : Optional[int] = 5_1_2, lowerCAmelCase__ : Optional[int] = 5_1_2, lowerCAmelCase__ : float = 0.6, lowerCAmelCase__ : Optional[int] = 5_0, lowerCAmelCase__ : Optional[float] = 7.5, lowerCAmelCase__ : Optional[int] = 1, lowerCAmelCase__ : float = 0.0, lowerCAmelCase__ : Optional[float] = 1_0_0, lowerCAmelCase__ : Optional[torch.Generator] = None, lowerCAmelCase__ : Optional[str] = "pil", lowerCAmelCase__ : bool = True, lowerCAmelCase__ : float = 0.8, lowerCAmelCase__ : float = 0.1, lowerCAmelCase__ : float = 0.1, ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__, lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(lowerCAmelCase__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(lowerCAmelCase__, torch.Generator ) and batch_size > 1:
_UpperCamelCase : Dict = [generator] + [None] * (batch_size - 1)
_UpperCamelCase : Optional[Any] = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_UpperCamelCase : List[Any] = [x[0] for x in coca_is_none if x[1]]
_UpperCamelCase : int = ''', '''.join(lowerCAmelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCAmelCase__ ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_UpperCamelCase : List[Any] = self.get_image_description(lowerCAmelCase__ )
if style_prompt is None:
if len(lowerCAmelCase__ ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_UpperCamelCase : Dict = self.get_image_description(lowerCAmelCase__ )
# get prompt text embeddings for content and style
_UpperCamelCase : List[Any] = self.tokenizer(
lowerCAmelCase__, padding='''max_length''', max_length=self.tokenizer.model_max_length, truncation=lowerCAmelCase__, return_tensors='''pt''', )
_UpperCamelCase : Any = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_UpperCamelCase : Optional[int] = self.tokenizer(
lowerCAmelCase__, padding='''max_length''', max_length=self.tokenizer.model_max_length, truncation=lowerCAmelCase__, return_tensors='''pt''', )
_UpperCamelCase : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_UpperCamelCase : str = slerp(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
# duplicate text embeddings for each generation per prompt
_UpperCamelCase : List[str] = text_embeddings.repeat_interleave(lowerCAmelCase__, dim=0 )
# set timesteps
_UpperCamelCase : List[str] = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_UpperCamelCase : Any = {}
if accepts_offset:
_UpperCamelCase : Tuple = 1
self.scheduler.set_timesteps(lowerCAmelCase__, **lowerCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_UpperCamelCase , _UpperCamelCase : Any = self.get_timesteps(lowerCAmelCase__, lowerCAmelCase__, self.device )
_UpperCamelCase : Dict = timesteps[:1].repeat(lowerCAmelCase__ )
# Preprocess image
_UpperCamelCase : Optional[int] = preprocess(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = self.prepare_latents(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, text_embeddings.dtype, self.device, lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = preprocess(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : str = self.prepare_latents(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, text_embeddings.dtype, self.device, lowerCAmelCase__ )
_UpperCamelCase : Tuple = slerp(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
if clip_guidance_scale > 0:
_UpperCamelCase : List[Any] = self.get_clip_image_embeddings(lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : List[Any] = self.get_clip_image_embeddings(lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : Any = slerp(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCamelCase : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase : Tuple = content_text_input.input_ids.shape[-1]
_UpperCamelCase : Optional[Any] = self.tokenizer([''''''], padding='''max_length''', max_length=lowerCAmelCase__, return_tensors='''pt''' )
_UpperCamelCase : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_UpperCamelCase : Optional[Any] = uncond_embeddings.repeat_interleave(lowerCAmelCase__, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCamelCase : int = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_UpperCamelCase : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_UpperCamelCase : List[str] = torch.randn(lowerCAmelCase__, generator=lowerCAmelCase__, device='''cpu''', dtype=lowerCAmelCase__ ).to(
self.device )
else:
_UpperCamelCase : Any = torch.randn(lowerCAmelCase__, generator=lowerCAmelCase__, device=self.device, dtype=lowerCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_UpperCamelCase : Dict = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCamelCase : List[str] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCamelCase : Optional[Any] = {}
if accepts_eta:
_UpperCamelCase : Union[str, Any] = eta
# check if the scheduler accepts generator
_UpperCamelCase : Union[str, Any] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_UpperCamelCase : Optional[Any] = generator
with self.progress_bar(total=lowerCAmelCase__ ):
for i, t in enumerate(lowerCAmelCase__ ):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCamelCase : Any = self.scheduler.scale_model_input(lowerCAmelCase__, lowerCAmelCase__ )
# predict the noise residual
_UpperCamelCase : Tuple = self.unet(lowerCAmelCase__, lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = noise_pred.chunk(2 )
_UpperCamelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_UpperCamelCase : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_UpperCamelCase , _UpperCamelCase : Tuple = self.cond_fn(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase : Dict = self.scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase : Dict = 1 / 0.18_215 * latents
_UpperCamelCase : Union[str, Any] = self.vae.decode(lowerCAmelCase__ ).sample
_UpperCamelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0, 1 )
_UpperCamelCase : Union[str, Any] = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
_UpperCamelCase : Tuple = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCAmelCase__, nsfw_content_detected=lowerCAmelCase__ )
| 128
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( _lowerCAmelCase ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''BlipImageProcessor'''
UpperCamelCase = '''AutoTokenizer'''
def __init__( self : List[str], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Optional[int] ) -> int:
'''simple docstring'''
_UpperCamelCase : Any = False
super().__init__(lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : Tuple = self.image_processor
def __call__( self : str, lowerCAmelCase__ : ImageInput = None, lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCAmelCase__ : bool = True, lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False, lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None, lowerCAmelCase__ : Optional[int] = None, lowerCAmelCase__ : int = 0, lowerCAmelCase__ : Optional[int] = None, lowerCAmelCase__ : Optional[bool] = None, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = True, lowerCAmelCase__ : Optional[Union[str, TensorType]] = None, **lowerCAmelCase__ : Optional[Any], ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase : int = self.tokenizer
_UpperCamelCase : List[str] = self.tokenizer(
text=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=lowerCAmelCase__, stride=lowerCAmelCase__, pad_to_multiple_of=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, return_overflowing_tokens=lowerCAmelCase__, return_special_tokens_mask=lowerCAmelCase__, return_offsets_mapping=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_length=lowerCAmelCase__, verbose=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__, )
return text_encoding
# add pixel_values
_UpperCamelCase : List[str] = self.image_processor(lowerCAmelCase__, return_tensors=lowerCAmelCase__ )
if text is not None:
_UpperCamelCase : Any = self.tokenizer(
text=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=lowerCAmelCase__, stride=lowerCAmelCase__, pad_to_multiple_of=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, return_overflowing_tokens=lowerCAmelCase__, return_special_tokens_mask=lowerCAmelCase__, return_offsets_mapping=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_length=lowerCAmelCase__, verbose=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__, )
else:
_UpperCamelCase : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__ )
return encoding_image_processor
def snake_case ( self : List[Any], *lowerCAmelCase__ : List[str], **lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__, **lowerCAmelCase__ )
def snake_case ( self : List[Any], *lowerCAmelCase__ : Dict, **lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__, **lowerCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase : List[str] = self.tokenizer.model_input_names
_UpperCamelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 128
| 1
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any]=1_3 , __UpperCAmelCase : int=7 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[Any]=9_9 , __UpperCAmelCase : int=3_2 , __UpperCAmelCase : Union[str, Any]=5 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=3_7 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : str=1_2_8 , __UpperCAmelCase : str=3_2 , __UpperCAmelCase : Any=1_6 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=None , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
(
UpperCAmelCase__
) = self.prepare_config_and_inputs()
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ (self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = NezhaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
UpperCAmelCase__ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
UpperCAmelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ (self : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = True
UpperCAmelCase__ = NezhaModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
UpperCAmelCase__ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ (self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = NezhaForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = NezhaForNextSentencePrediction(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = NezhaForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , next_sentence_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = NezhaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = NezhaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = NezhaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = NezhaForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( a__ , a__ , a__ , unittest.TestCase ):
__UpperCAmelCase : str = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[int] = True
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
UpperCAmelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
UpperCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def lowercase_ (self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = NezhaModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowercase_ (self : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase__ )
def lowercase_ (self : Optional[Any] ) -> str:
"""simple docstring"""
(
UpperCAmelCase__
) = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
def lowercase_ (self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def lowercase_ (self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def lowercase_ (self : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase__ )
def lowercase_ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def lowercase_ (self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def lowercase_ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def lowercase_ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def lowercase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = NezhaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
@require_torch_gpu
def lowercase_ (self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(config=lowerCAmelCase__ )
UpperCAmelCase__ = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ = torch.jit.trace(
lowerCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , "bert.pt" ) )
UpperCAmelCase__ = torch.jit.load(os.path.join(lowerCAmelCase__ , "bert.pt" ) , map_location=lowerCAmelCase__ )
loaded(inputs_dict["input_ids"].to(lowerCAmelCase__ ) , inputs_dict["attention_mask"].to(lowerCAmelCase__ ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
UpperCAmelCase__ = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
UpperCAmelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
UpperCAmelCase__ = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase__ )
UpperCAmelCase__ = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def lowercase_ (self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
UpperCAmelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
UpperCAmelCase__ = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , lowerCAmelCase__ )
UpperCAmelCase__ = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
| 65
|
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self , *,
lowerCAmelCase__ = 4 , lowerCAmelCase__ = 768 , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[Any]:
super().__init__()
a : Tuple = nn.Parameter(torch.zeros(lowerCAmelCase__ ) )
# parameters for additional clip time embeddings
a : str = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
a : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
# parameters for encoder hidden states
a : int = clip_extra_context_tokens
a : int = nn.Linear(
lowerCAmelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
a : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
a : str = nn.LayerNorm(lowerCAmelCase__ )
def __a ( self , *, lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
a : str = image_embeddings.shape[0]
a : Optional[int] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
a : Any = classifier_free_guidance_embeddings.expand(
lowerCAmelCase__ , -1 )
a : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
a : List[str] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
a : Dict = self.embedding_proj(lowerCAmelCase__ )
a : List[str] = self.clip_image_embeddings_project_to_time_embeddings(lowerCAmelCase__ )
a : Dict = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
a : Union[str, Any] = self.clip_extra_context_tokens_proj(lowerCAmelCase__ )
a : List[str] = clip_extra_context_tokens.reshape(lowerCAmelCase__ , -1 , self.clip_extra_context_tokens )
a : Optional[Any] = clip_extra_context_tokens.permute(0 , 2 , 1 )
a : Optional[int] = self.encoder_hidden_states_proj(lowerCAmelCase__ )
a : str = self.text_encoder_hidden_states_norm(lowerCAmelCase__ )
a : List[str] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 105
| 0
|
import numpy as np
a__: Dict = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class SCREAMING_SNAKE_CASE__ :
def __init__( self ):
A__ = np.array(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ , A__ = np.where(letter == self.SQUARE )
A__ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = message.lower()
A__ = message.replace(''' ''','''''' )
A__ = message.replace('''j''','''i''' )
A__ = np.empty((2, len(__lowerCamelCase )) )
for letter_index in range(len(__lowerCamelCase ) ):
A__ = self.letter_to_numbers(message[letter_index] )
A__ = numbers[0]
A__ = numbers[1]
A__ = first_step.reshape(2 * len(__lowerCamelCase ) )
A__ = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
A__ = int(second_step[numbers_index * 2] )
A__ = int(second_step[(numbers_index * 2) + 1] )
A__ = self.numbers_to_letter(__lowerCamelCase,__lowerCamelCase )
A__ = encoded_message + letter
return encoded_message
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = message.lower()
message.replace(''' ''','''''' )
A__ = np.empty(2 * len(__lowerCamelCase ) )
for letter_index in range(len(__lowerCamelCase ) ):
A__ = self.letter_to_numbers(message[letter_index] )
A__ = numbers[0]
A__ = numbers[1]
A__ = first_step.reshape((2, len(__lowerCamelCase )) )
A__ = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
A__ = int(second_step[0, numbers_index] )
A__ = int(second_step[1, numbers_index] )
A__ = self.numbers_to_letter(__lowerCamelCase,__lowerCamelCase )
A__ = decoded_message + letter
return decoded_message
| 39
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
# pass variant but use the non-variant filenames
A__ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
A__ = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
# pass variant but use the non-variant filenames
A__ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
| 39
| 1
|
class __snake_case :
def __init__( self : str , A_ : Dict , A_ : Optional[int]):
lowerCAmelCase_ : str = name
lowerCAmelCase_ : List[Any] = val
def __str__( self : List[str]):
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : List[str] , A_ : Tuple):
return self.val < other.val
class __snake_case :
def __init__( self : int , A_ : Dict):
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : Dict = self.build_heap(A_)
def __getitem__( self : List[str] , A_ : List[Any]):
return self.get_value(A_)
def UpperCAmelCase__ ( self : List[Any] , A_ : str):
return (idx - 1) // 2
def UpperCAmelCase__ ( self : int , A_ : List[str]):
return idx * 2 + 1
def UpperCAmelCase__ ( self : str , A_ : List[Any]):
return idx * 2 + 2
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Tuple):
return self.heap_dict[key]
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Tuple):
lowerCAmelCase_ : Dict = len(A_) - 1
lowerCAmelCase_ : List[Any] = self.get_parent_idx(A_)
for idx, i in enumerate(A_):
lowerCAmelCase_ : List[str] = idx
lowerCAmelCase_ : Any = i.val
for i in range(A_ , -1 , -1):
self.sift_down(A_ , A_)
return array
def UpperCAmelCase__ ( self : List[str] , A_ : int , A_ : Tuple):
while True:
lowerCAmelCase_ : Union[str, Any] = self.get_left_child_idx(A_) # noqa: E741
lowerCAmelCase_ : int = self.get_right_child_idx(A_)
lowerCAmelCase_ : int = idx
if l < len(A_) and array[l] < array[idx]:
lowerCAmelCase_ : Any = l
if r < len(A_) and array[r] < array[smallest]:
lowerCAmelCase_ : List[str] = r
if smallest != idx:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = array[smallest], array[idx]
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : List[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase_ : Union[str, Any] = smallest
else:
break
def UpperCAmelCase__ ( self : Tuple , A_ : List[str]):
lowerCAmelCase_ : Any = self.get_parent_idx(A_)
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.heap[idx], self.heap[p]
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase_ : int = p
lowerCAmelCase_ : Tuple = self.get_parent_idx(A_)
def UpperCAmelCase__ ( self : Optional[Any]):
return self.heap[0]
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.heap[-1], self.heap[0]
lowerCAmelCase_ , lowerCAmelCase_ : Any = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase_ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def UpperCAmelCase__ ( self : Any , A_ : Any):
self.heap.append(A_)
lowerCAmelCase_ : Optional[int] = len(self.heap) - 1
lowerCAmelCase_ : Optional[int] = node.val
self.sift_up(len(self.heap) - 1)
def UpperCAmelCase__ ( self : List[Any]):
return len(self.heap) == 0
def UpperCAmelCase__ ( self : Optional[int] , A_ : Optional[Any] , A_ : int):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase_ : Tuple = new_value
lowerCAmelCase_ : Tuple = new_value
self.sift_up(self.idx_of_element[node])
A__ : List[str] = Node('''R''', -1)
A__ : Optional[Any] = Node('''B''', 6)
A__ : List[str] = Node('''A''', 3)
A__ : Optional[Any] = Node('''X''', 1)
A__ : Tuple = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A__ : Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103
|
lowerCamelCase_ = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
lowerCamelCase_ = frozenset(['''prompt''', '''negative_prompt'''])
lowerCamelCase_ = frozenset([])
lowerCamelCase_ = frozenset(['''image'''])
lowerCamelCase_ = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
lowerCamelCase_ = frozenset(['''image'''])
lowerCamelCase_ = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
lowerCamelCase_ = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
lowerCamelCase_ = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
lowerCamelCase_ = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
lowerCamelCase_ = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
lowerCamelCase_ = frozenset(['''image''', '''mask_image'''])
lowerCamelCase_ = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
lowerCamelCase_ = frozenset(['''example_image''', '''image''', '''mask_image'''])
lowerCamelCase_ = frozenset(['''class_labels'''])
lowerCamelCase_ = frozenset(['''class_labels'''])
lowerCamelCase_ = frozenset(['''batch_size'''])
lowerCamelCase_ = frozenset([])
lowerCamelCase_ = frozenset(['''batch_size'''])
lowerCamelCase_ = frozenset([])
lowerCamelCase_ = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
lowerCamelCase_ = frozenset(['''prompt''', '''negative_prompt'''])
lowerCamelCase_ = frozenset(['''input_tokens'''])
lowerCamelCase_ = frozenset(['''input_tokens'''])
| 244
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , __A=1 / 255 , __A=True , ) -> Any:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase_ :Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCAmelCase_ :str = parent
lowerCAmelCase_ :Optional[int] = batch_size
lowerCAmelCase_ :Union[str, Any] = num_channels
lowerCAmelCase_ :str = min_resolution
lowerCAmelCase_ :str = max_resolution
lowerCAmelCase_ :Optional[int] = do_resize
lowerCAmelCase_ :Dict = size
lowerCAmelCase_ :str = do_normalize
lowerCAmelCase_ :int = image_mean
lowerCAmelCase_ :Any = image_std
lowerCAmelCase_ :str = do_rescale
lowerCAmelCase_ :str = rescale_factor
lowerCAmelCase_ :int = do_pad
def __lowerCAmelCase ( self ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self , __A , __A=False ) -> Any:
if not batched:
lowerCAmelCase_ :Union[str, Any] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
lowerCAmelCase_ :Optional[Any] = image.size
else:
lowerCAmelCase_ :Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ :str = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase_ :Dict = self.size["shortest_edge"]
elif w > h:
lowerCAmelCase_ :Optional[int] = self.size["shortest_edge"]
lowerCAmelCase_ :Optional[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase_ :int = self.size["shortest_edge"]
lowerCAmelCase_ :Optional[Any] = self.size["shortest_edge"]
else:
lowerCAmelCase_ :List[str] = []
for image in image_inputs:
lowerCAmelCase_ :Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ :List[Any] = max(lowerCAmelCase__ , key=lambda __A : item[0] )[0]
lowerCAmelCase_ :Tuple = max(lowerCAmelCase__ , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( a__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[Any] = DetaImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Any = DetaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_rescale""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
lowerCAmelCase_ :str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ :Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
lowerCAmelCase_ :Tuple = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processing
lowerCAmelCase_ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
lowerCAmelCase_ :Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ :Tuple = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> int:
# Initialize image_processing
lowerCAmelCase_ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
lowerCAmelCase_ :Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ :str = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ :Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCAmelCase ( self ) -> str:
# prepare image and target
lowerCAmelCase_ :Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase_ :int = json.loads(f.read() )
lowerCAmelCase_ :List[str] = {"image_id": 3_9769, "annotations": target}
# encode them
lowerCAmelCase_ :Dict = DetaImageProcessor()
lowerCAmelCase_ :Optional[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase_ :Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase__ )
lowerCAmelCase_ :Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
lowerCAmelCase_ :Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase__ ) )
# verify boxes
lowerCAmelCase_ :Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase__ )
lowerCAmelCase_ :List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ :Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase__ ) )
# verify is_crowd
lowerCAmelCase_ :Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase__ ) )
# verify class_labels
lowerCAmelCase_ :str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase__ ) )
# verify orig_size
lowerCAmelCase_ :int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase__ ) )
# verify size
lowerCAmelCase_ :List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase__ ) )
@slow
def __lowerCAmelCase ( self ) -> Any:
# prepare image, target and masks_path
lowerCAmelCase_ :Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase_ :Dict = json.loads(f.read() )
lowerCAmelCase_ :Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
lowerCAmelCase_ :Optional[int] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase_ :List[Any] = DetaImageProcessor(format="""coco_panoptic""" )
lowerCAmelCase_ :Optional[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase_ :Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase__ )
lowerCAmelCase_ :Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
lowerCAmelCase_ :str = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase__ ) )
# verify boxes
lowerCAmelCase_ :Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase__ )
lowerCAmelCase_ :List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ :Optional[int] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase__ ) )
# verify is_crowd
lowerCAmelCase_ :Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase__ ) )
# verify class_labels
lowerCAmelCase_ :Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase__ ) )
# verify masks
lowerCAmelCase_ :Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCAmelCase__ )
# verify orig_size
lowerCAmelCase_ :Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase__ ) )
# verify size
lowerCAmelCase_ :Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase__ ) )
| 362
|
"""simple docstring"""
import os
from math import logaa
def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :float = 0
lowerCAmelCase_ :Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) )
if x * logaa(lowercase__ ) > largest:
lowerCAmelCase_ :Any = x * logaa(lowercase__ )
lowerCAmelCase_ :List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1
| 0
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __a ( ):
UpperCAmelCase_ : int = HfArgumentParser(_a )
UpperCAmelCase_ : List[str] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Optional[Any] = TensorFlowBenchmark(args=_a )
try:
UpperCAmelCase_ : Dict = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ : Union[str, Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase_ : Any = ''' '''.join(str(_a ).split(" " )[:-1] )
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Any = eval(str(_a ).split(" " )[-1] )
UpperCAmelCase_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_a )
if len(_a ) > 0:
UpperCAmelCase_ : Optional[int] = full_error_msg + begin_error_msg + str(_a )
raise ValueError(_a )
benchmark.run()
if __name__ == "__main__":
main()
| 61
|
"""simple docstring"""
import numpy as np
def __lowercase ( _a ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 0
|
'''simple docstring'''
import math
import os
import sys
def __lowerCamelCase ( __snake_case : str ) -> str:
"""simple docstring"""
A__ : List[Any] =""""""
try:
with open(__snake_case, """rb""" ) as binary_file:
A__ : str =binary_file.read()
for dat in data:
A__ : Optional[Any] =f"{dat:08b}"
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def __lowerCamelCase ( __snake_case : dict[str, str], __snake_case : str, __snake_case : int, __snake_case : str ) -> None:
"""simple docstring"""
lexicon.pop(__snake_case )
A__ : Optional[Any] =last_match_id
if math.loga(__snake_case ).is_integer():
for curr_key in lexicon:
A__ : str ="""0""" + lexicon[curr_key]
A__ : Dict =bin(__snake_case )[2:]
def __lowerCamelCase ( __snake_case : str ) -> str:
"""simple docstring"""
A__ : Union[str, Any] ={"""0""": """0""", """1""": """1"""}
A__ , A__ : Union[str, Any] ="""""", """"""
A__ : Any =len(__snake_case )
for i in range(len(__snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A__ : Tuple =lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__snake_case, __snake_case, __snake_case, __snake_case )
index += 1
A__ : Any =""""""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
A__ : int =lexicon[curr_string]
result += last_match_id
return result
def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> str:
"""simple docstring"""
A__ : Dict =os.path.getsize(__snake_case )
A__ : Union[str, Any] =bin(__snake_case )[2:]
A__ : Optional[Any] =len(__snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> None:
"""simple docstring"""
A__ : Optional[Any] =8
try:
with open(__snake_case, """wb""" ) as opened_file:
A__ : str =[
to_write[i : i + byte_length]
for i in range(0, len(__snake_case ), __snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__snake_case, 2 ).to_bytes(1, byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> None:
"""simple docstring"""
A__ : str =read_file_binary(__snake_case )
A__ : str =compress_data(__snake_case )
A__ : str =add_file_length(__snake_case, __snake_case )
write_file_binary(__snake_case, __snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 136
|
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
A__ : List[Any] ={}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
A__ : str =truncation
A__ : Optional[int] =tokenize_kwargs
A__ : List[Any] ={}
if return_tensors is not None:
A__ : Any =return_tensors
return preprocess_params, {}, postprocess_params
def lowercase__ ( self : int , lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Dict ) -> Dict[str, GenericTensor]:
'''simple docstring'''
A__ : List[str] =self.framework
A__ : Union[str, Any] =self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
return model_inputs
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.model(**lowerCAmelCase_ )
return model_outputs
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False ) -> List[Any]:
'''simple docstring'''
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : int , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 136
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase : Dict =TypeVar("""T""")
class _lowercase (Generic[T] ):
'''simple docstring'''
def __init__( self , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = data
UpperCamelCase_ = None
def __str__( self ):
'''simple docstring'''
return F"""{self.data}"""
class _lowercase (Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
UpperCamelCase_ = None
def __iter__( self ):
'''simple docstring'''
UpperCamelCase_ = self.top
while node:
yield node.data
UpperCamelCase_ = node.next
def __str__( self ):
'''simple docstring'''
return "->".join([str(snake_case__ ) for item in self] )
def __len__( self ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.top is None
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = Node(snake_case__ )
if not self.is_empty():
UpperCamelCase_ = self.top
UpperCamelCase_ = node
def _lowerCamelCase ( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , snake_case__ )
UpperCamelCase_ = self.top
UpperCamelCase_ = self.top.next
return pop_node.data
def _lowerCamelCase ( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 128
|
UpperCAmelCase : Optional[Any] ={
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 128
| 1
|
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
snake_case ,snake_case : Optional[int] = array[indexa], array[indexa]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
'''simple docstring'''
if length > 1:
snake_case : Optional[Any] = int(length / 2 )
for i in range(__lowerCAmelCase , low + middle ):
comp_and_swap(__lowerCAmelCase , __lowerCAmelCase , i + middle , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , low + middle , __lowerCAmelCase , __lowerCAmelCase )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
'''simple docstring'''
if length > 1:
snake_case : Union[str, Any] = int(length / 2 )
bitonic_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
bitonic_sort(__lowerCAmelCase , low + middle , __lowerCAmelCase , 0 )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
lowercase__ = input("Enter numbers separated by a comma:\n").strip()
lowercase__ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 370
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=99 , UpperCamelCase__ : str=16 , UpperCamelCase__ : Dict=36 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : Any=6 , UpperCamelCase__ : Any=6 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Optional[int]=None , ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = parent
snake_case : str = batch_size
snake_case : Optional[Any] = seq_length
snake_case : Optional[int] = is_training
snake_case : Optional[int] = use_input_mask
snake_case : List[Any] = use_token_type_ids
snake_case : Tuple = use_labels
snake_case : Optional[Any] = vocab_size
snake_case : List[Any] = embedding_size
snake_case : Any = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Union[str, Any] = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : List[str] = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : Union[str, Any] = type_vocab_size
snake_case : List[Any] = type_sequence_label_size
snake_case : List[Any] = initializer_range
snake_case : Union[str, Any] = num_labels
snake_case : Optional[Any] = num_choices
snake_case : Optional[int] = scope
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_input_mask:
snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Optional[int] = None
if self.use_token_type_ids:
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Union[str, Any] = None
snake_case : List[Any] = None
snake_case : int = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[int] = AlbertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : List[str] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
snake_case : str = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
snake_case : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case : Optional[Any] = AlbertForPreTraining(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : List[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , sentence_order_label=UpperCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : str = AlbertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
snake_case : int = AlbertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : str = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[int] = self.num_labels
snake_case : List[str] = AlbertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
snake_case : Tuple = self.num_labels
snake_case : int = AlbertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
snake_case : int = self.num_choices
snake_case : List[Any] = AlbertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Union[str, Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) : List[str] = config_and_inputs
snake_case : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = True
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int=False ) -> Optional[Any]:
"""simple docstring"""
snake_case : Any = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
snake_case : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ )
snake_case : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Any = AlbertModelTester(self )
snake_case : Any = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : int = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[Any] = AlbertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Tuple = AlbertModel.from_pretrained('''albert-base-v2''' )
snake_case : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
snake_case : int = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
snake_case : Any = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) )
| 83
| 0
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["image_processor", "tokenizer"]
UpperCamelCase__ = "BlipImageProcessor"
UpperCamelCase__ = "AutoTokenizer"
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
super().__init__(UpperCAmelCase , UpperCAmelCase )
# add QFormer tokenizer
_UpperCAmelCase = qformer_tokenizer
def __call__( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_UpperCAmelCase = BatchFeature()
if text is not None:
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
encoding.update(UpperCAmelCase )
_UpperCAmelCase = self.qformer_tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
_UpperCAmelCase = qformer_text_encoding.pop('input_ids' )
_UpperCAmelCase = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_UpperCAmelCase = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
encoding.update(UpperCAmelCase )
return encoding
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase ( self , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
if os.path.isfile(UpperCAmelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase )
return super().save_pretrained(UpperCAmelCase , **UpperCAmelCase )
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase , subfolder='qformer_tokenizer' )
_UpperCAmelCase = cls._get_arguments_from_pretrained(UpperCAmelCase , **UpperCAmelCase )
args.append(UpperCAmelCase )
return cls(*UpperCAmelCase )
| 39
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "distilbert"
UpperCamelCase__ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=512 , UpperCAmelCase=False , UpperCAmelCase=6 , UpperCAmelCase=12 , UpperCAmelCase=768 , UpperCAmelCase=4 * 768 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=0.1 , UpperCAmelCase=0.2 , UpperCAmelCase=0 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = sinusoidal_pos_embds
_UpperCAmelCase = n_layers
_UpperCAmelCase = n_heads
_UpperCAmelCase = dim
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation
_UpperCAmelCase = initializer_range
_UpperCAmelCase = qa_dropout
_UpperCAmelCase = seq_classif_dropout
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 39
| 1
|
from timeit import timeit
_lowerCamelCase : Any = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Any = len(UpperCamelCase_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : int = len(UpperCamelCase_ ) // 2
_lowerCAmelCase : Optional[Any] = len(UpperCamelCase_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(UpperCamelCase_ ) )
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
if len(UpperCamelCase_ ) <= 2:
return True
if s[0] == s[len(UpperCamelCase_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
return s == s[::-1]
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"all({name}(key) is value for key, value in test_data.items())"
_lowerCAmelCase : Tuple = F"from __main__ import test_data, {name}"
_lowerCAmelCase : Dict = 500000
_lowerCAmelCase : Optional[Any] = timeit(stmt=UpperCamelCase_ , setup=UpperCamelCase_ , number=UpperCamelCase_ )
print(F"{name:<35} finished {number:,} runs in {result:.5f} seconds" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'''{key:21} {value}''')
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 355
|
_lowerCamelCase : List[Any] = tuple[float, float, float]
_lowerCamelCase : Tuple = tuple[float, float, float]
def _UpperCAmelCase (UpperCamelCase_ : Pointad , UpperCamelCase_ : Pointad ):
'''simple docstring'''
_lowerCAmelCase : Tuple = end_pointa[0] - end_pointa[0]
_lowerCAmelCase : str = end_pointa[1] - end_pointa[1]
_lowerCAmelCase : List[Any] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _UpperCAmelCase (UpperCamelCase_ : Vectorad , UpperCamelCase_ : Vectorad ):
'''simple docstring'''
_lowerCAmelCase : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
_lowerCAmelCase : int = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_lowerCAmelCase : List[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _UpperCAmelCase (UpperCamelCase_ : Vectorad , UpperCamelCase_ : int ):
'''simple docstring'''
return tuple(round(UpperCamelCase_ , UpperCamelCase_ ) for x in vector ) == (0, 0, 0)
def _UpperCAmelCase (UpperCamelCase_ : Pointad , UpperCamelCase_ : Pointad , UpperCamelCase_ : Pointad , UpperCamelCase_ : int = 10 ):
'''simple docstring'''
_lowerCAmelCase : Any = create_vector(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = create_vector(UpperCamelCase_ , UpperCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
| 159
| 0
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __lowercase ( __lowercase ) -> Dict:
'''simple docstring'''
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def __lowercase ( __lowercase ) -> List[Any]:
'''simple docstring'''
_A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_A = s_dict.pop(__lowercase )
elif "subsample" in key:
_A = s_dict.pop(__lowercase )
def __lowercase ( __lowercase ) -> Tuple:
'''simple docstring'''
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def __lowercase ( __lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
_A = torch.load(__lowercase , map_location="cpu" )
_A = mam_aaa["args"]
_A = mam_aaa["model"]
_A = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(__lowercase )
rename_keys(__lowercase )
_A = state_dict["decoder.embed_tokens.weight"].shape[0]
_A = args.share_decoder_input_output_embed
_A = [int(__lowercase ) for i in args.conv_kernel_sizes.split("," )]
_A = SpeechaTextConfig(
vocab_size=__lowercase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(__lowercase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__lowercase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__lowercase , num_beams=5 , max_length=200 , use_cache=__lowercase , decoder_start_token_id=2 , early_stopping=__lowercase , )
_A = SpeechaTextForConditionalGeneration(__lowercase )
_A , _A = model.model.load_state_dict(__lowercase , strict=__lowercase )
if len(__lowercase ) > 0 and not set(__lowercase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
_A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_A = lm_head_weights
model.save_pretrained(__lowercase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCamelCase_ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 79
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_: Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:2_56, :]
UpperCAmelCase_ = in_proj_bias[:2_56]
UpperCAmelCase_ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias[2_56:5_12]
UpperCAmelCase_ = in_proj_weight[-2_56:, :]
UpperCAmelCase_ = in_proj_bias[-2_56:]
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase_ = "resnet101"
if "dc5" in model_name:
UpperCAmelCase_ = True
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 2_50
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ )
# prepare image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval()
UpperCAmelCase_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase_ = "conditional_detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase_ = conditional_detr(snake_case_ )
UpperCAmelCase_ = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 1
| 0
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__snake_case = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class lowercase__ :
A__ : str
A__ : str
A__ : Optional[str] =None
A__ : Optional[str] =None
A__ : Optional[str] =None
@dataclass(frozen=_UpperCAmelCase )
class lowercase__ :
A__ : List[int]
A__ : Optional[List[int]] =None
A__ : Optional[List[int]] =None
A__ : Optional[Union[int, float]] =None
A__ : Optional[int] =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowercase__ ( _UpperCAmelCase ):
A__ : List[InputFeatures]
def __init__( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str=False , UpperCAmelCase_ : bool = False , ):
SCREAMING_SNAKE_CASE__ = hans_processors[task]()
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCAmelCase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCAmelCase_ ) , UpperCAmelCase_ , ) , )
SCREAMING_SNAKE_CASE__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_ ):
if os.path.exists(UpperCAmelCase_ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE__ = torch.load(UpperCAmelCase_ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE__ = (
processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ )
)
logger.info('Training examples: %s' , len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info('Saving features into cached file %s' , UpperCAmelCase_ )
torch.save(self.features , UpperCAmelCase_ )
def __len__( self : str ):
return len(self.features )
def __getitem__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
return self.features[i]
def A_ ( self : Any ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowercase__ :
A__ : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = 128 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : bool = False , ):
SCREAMING_SNAKE_CASE__ = hans_processors[task]()
SCREAMING_SNAKE_CASE__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE__ = label_list
SCREAMING_SNAKE_CASE__ = processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCAmelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE__ = tf.data.Dataset.from_generator(
UpperCAmelCase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A_ ( self : str ):
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
return self.features[i]
def A_ ( self : Union[str, Any] ):
return self.label_list
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : Optional[Any] , UpperCAmelCase_ : str ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : Dict ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def A_ ( self : Optional[Any] ):
return ["contradiction", "entailment", "neutral"]
def A_ ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = []
for i, line in enumerate(UpperCAmelCase_ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE__ = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE__ = line[5]
SCREAMING_SNAKE_CASE__ = line[6]
SCREAMING_SNAKE_CASE__ = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE__ = line[0]
examples.append(InputExample(guid=UpperCAmelCase_ , text_a=UpperCAmelCase_ , text_b=UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
return examples
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {label: i for i, label in enumerate(UpperCamelCase_ )}
SCREAMING_SNAKE_CASE__ = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE__ = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' , truncation=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE__ = int(example.pairID )
features.append(InputFeatures(**UpperCamelCase_ , label=UpperCamelCase_ , pairID=UpperCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
__snake_case = {
"""hans""": 3,
}
__snake_case = {
"""hans""": HansProcessor,
}
| 368
|
from __future__ import annotations
class lowercase__ :
def __init__( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = text, pattern
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ ), len(UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A_ ( self : Tuple , UpperCAmelCase_ : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A_ ( self : str ):
# searches pattern in text and returns index positions
SCREAMING_SNAKE_CASE__ = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE__ = self.mismatch_in_text(UpperCAmelCase_ )
if mismatch_index == -1:
positions.append(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__snake_case = """ABAABA"""
__snake_case = """AB"""
__snake_case = BoyerMooreSearch(text, pattern)
__snake_case = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 169
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
lowercase_ = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=__lowerCAmelCase )
lowercase_ = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=__lowerCAmelCase )
env_command_parser(subparsers=__lowerCAmelCase )
launch_command_parser(subparsers=__lowerCAmelCase )
tpu_command_parser(subparsers=__lowerCAmelCase )
test_command_parser(subparsers=__lowerCAmelCase )
# Let's go
lowercase_ = parser.parse_args()
if not hasattr(__lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 136
|
"""simple docstring"""
UpperCAmelCase : Optional[Any] = tuple[float, float, float]
UpperCAmelCase : Optional[Any] = tuple[float, float, float]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Vectorad:
'''simple docstring'''
lowercase_ = end_pointa[0] - end_pointa[0]
lowercase_ = end_pointa[1] - end_pointa[1]
lowercase_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Vectorad:
'''simple docstring'''
lowercase_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> bool:
'''simple docstring'''
return tuple(round(__lowerCAmelCase , __lowerCAmelCase ) for x in vector ) == (0, 0, 0)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10 ) -> bool:
'''simple docstring'''
lowercase_ = create_vector(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = create_vector(__lowerCAmelCase , __lowerCAmelCase )
return is_zero_vector(get_ad_vectors_cross(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
| 136
| 1
|
from __future__ import annotations
import os
from collections.abc import Mapping
UpperCAmelCase__ = tuple[int, int]
class __lowerCAmelCase :
def __init__( self : List[Any] , A : set[int] , A : Mapping[EdgeT, int]) -> None:
"""simple docstring"""
_UpperCAmelCase = vertices
_UpperCAmelCase = {
(min(A), max(A)): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self : Tuple , A : EdgeT , A : int) -> None:
"""simple docstring"""
self.vertices.add(edge[0])
self.vertices.add(edge[1])
_UpperCAmelCase = weight
def _lowerCamelCase ( self : Tuple) -> Graph:
"""simple docstring"""
_UpperCAmelCase = Graph({min(self.vertices)} , {})
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
while len(subgraph.vertices) < len(self.vertices):
_UpperCAmelCase = max(self.edges.values()) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase = edge
_UpperCAmelCase = weight
subgraph.add_edge(A , A)
return subgraph
def A ( _UpperCAmelCase : str = "p107_network.txt" ) -> int:
'''simple docstring'''
_UpperCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.read().strip().split('\n' )
_UpperCAmelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
_UpperCAmelCase = graph.prims_algorithm()
_UpperCAmelCase = sum(graph.edges.values() )
_UpperCAmelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 290
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["CLIPFeatureExtractor"]
UpperCAmelCase__ = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """data2vec-text"""
def __init__( self : Union[str, Any] , __lowerCamelCase : str=3_05_22 , __lowerCamelCase : Dict=7_68 , __lowerCamelCase : Dict=12 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Optional[Any]=30_72 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[str]=5_12 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=1e-12 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Optional[int]="absolute" , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Any , ) -> int:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = classifier_dropout
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a = {0: "batch", 1: "choice", 2: "sequence"}
else:
a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 107
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
snake_case_ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowercase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowercase__ :
lowercase__ = field(default=lowercase , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
if self.train_file is not None:
_UpperCamelCase : List[Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCamelCase : Union[str, Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowercase__ :
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
def __call__( self : Optional[Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = 'label' if 'label' in features[0].keys() else 'labels'
_UpperCamelCase : List[Any] = [feature.pop(lowerCamelCase__ ) for feature in features]
_UpperCamelCase : Dict = len(lowerCamelCase__ )
_UpperCamelCase : List[str] = len(features[0]['input_ids'] )
_UpperCamelCase : List[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase__ )] for feature in features
]
_UpperCamelCase : str = list(chain(*lowerCamelCase__ ) )
_UpperCamelCase : Tuple = self.tokenizer.pad(
lowerCamelCase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
_UpperCamelCase : str = {k: v.view(lowerCamelCase__ ,lowerCamelCase__ ,-1 ) for k, v in batch.items()}
# Add back labels
_UpperCamelCase : Optional[int] = torch.tensor(lowerCamelCase__ ,dtype=torch.intaa )
return batch
def A__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
datasets.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_UpperCamelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCamelCase : Optional[int] = {}
if data_args.train_file is not None:
_UpperCamelCase : Tuple = data_args.train_file
if data_args.validation_file is not None:
_UpperCamelCase : Tuple = data_args.validation_file
_UpperCamelCase : Any = data_args.train_file.split('.' )[-1]
_UpperCamelCase : Union[str, Any] = load_dataset(
UpperCAmelCase_ , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCamelCase : List[str] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCamelCase : Any = [f'ending{i}' for i in range(4 )]
_UpperCamelCase : int = 'sent1'
_UpperCamelCase : List[str] = 'sent2'
if data_args.max_seq_length is None:
_UpperCamelCase : int = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_UpperCamelCase : int = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_UpperCamelCase : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCAmelCase_ ):
_UpperCamelCase : str = [[context] * 4 for context in examples[context_name]]
_UpperCamelCase : Optional[Any] = examples[question_header_name]
_UpperCamelCase : Tuple = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(UpperCAmelCase_ )
]
# Flatten out
_UpperCamelCase : Optional[int] = list(chain(*UpperCAmelCase_ ) )
_UpperCamelCase : Optional[Any] = list(chain(*UpperCAmelCase_ ) )
# Tokenize
_UpperCamelCase : Tuple = tokenizer(
UpperCAmelCase_ , UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCamelCase : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCamelCase : Tuple = min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
_UpperCamelCase : Tuple = train_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_UpperCamelCase : Union[str, Any] = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCamelCase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCamelCase : Union[str, Any] = min(len(UpperCAmelCase_ ) , data_args.max_eval_samples )
_UpperCamelCase : str = eval_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_UpperCamelCase : Dict = eval_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_UpperCamelCase : List[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = eval_predictions
_UpperCamelCase : List[str] = np.argmax(UpperCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCamelCase : Optional[int] = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
# Training
if training_args.do_train:
_UpperCamelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase : int = last_checkpoint
_UpperCamelCase : List[str] = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCamelCase : Union[str, Any] = train_result.metrics
_UpperCamelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
_UpperCamelCase : Optional[Any] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('train' , UpperCAmelCase_ )
trainer.save_metrics('train' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCamelCase : List[Any] = trainer.evaluate()
_UpperCamelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase_ )
_UpperCamelCase : int = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('eval' , UpperCAmelCase_ )
trainer.save_metrics('eval' , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 83
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Dict = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "glpn"
def __init__( self , __a=3 , __a=4 , __a=[2, 2, 2, 2] , __a=[8, 4, 2, 1] , __a=[32, 64, 160, 256] , __a=[7, 3, 3, 3] , __a=[4, 2, 2, 2] , __a=[1, 2, 5, 8] , __a=[4, 4, 4, 4] , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=0.1 , __a=1E-6 , __a=64 , __a=10 , __a=-1 , **__a , ):
'''simple docstring'''
super().__init__(**__a )
__a : Optional[int] = num_channels
__a : Optional[int] = num_encoder_blocks
__a : Tuple = depths
__a : Union[str, Any] = sr_ratios
__a : Tuple = hidden_sizes
__a : str = patch_sizes
__a : Dict = strides
__a : Any = mlp_ratios
__a : str = num_attention_heads
__a : str = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Any = drop_path_rate
__a : int = layer_norm_eps
__a : int = decoder_hidden_size
__a : List[str] = max_depth
__a : int = head_in_index
| 294
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 294
| 1
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCAmelCase = logging.get_logger(__name__)
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ : Optional[int] =[label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(_lowerCAmelCase ) )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ : Union[str, Any] =[sequences]
lowerCamelCase__ : Optional[int] =[]
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_lowerCAmelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(A__ )
class A_ ( A__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase_ :Dict=ZeroShotClassificationArgumentHandler() , *lowerCamelCase_ :Any , **lowerCamelCase_ :Dict ):
"""simple docstring"""
lowerCamelCase__ : List[str] =args_parser
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=TruncationStrategy.ONLY_FIRST , **lowerCamelCase_ :Tuple ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
lowerCamelCase__ : Any =self.tokenizer.eos_token
try:
lowerCamelCase__ : Optional[Any] =self.tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , )
except Exception as e:
if "too short" in str(_lowerCAmelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowerCamelCase__ : Union[str, Any] =self.tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase__ ( self :str , **lowerCamelCase_ :Any ):
"""simple docstring"""
if kwargs.get('multi_class' , _lowerCAmelCase ) is not None:
lowerCamelCase__ : Tuple =kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
lowerCamelCase__ : Dict ={}
if "candidate_labels" in kwargs:
lowerCamelCase__ : str =self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
lowerCamelCase__ : Optional[Any] =kwargs['hypothesis_template']
lowerCamelCase__ : Optional[Any] ={}
if "multi_label" in kwargs:
lowerCamelCase__ : int =kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self :int , lowerCamelCase_ :Union[str, List[str]] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :Optional[Any] , ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
pass
elif len(_lowerCAmelCase ) == 1 and "candidate_labels" not in kwargs:
lowerCamelCase__ : Optional[int] =args[0]
else:
raise ValueError(f"""Unable to understand extra arguments {args}""" )
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :Dict="This example is {}." ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Dict =self._args_parser(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(_lowerCAmelCase , _lowerCAmelCase ) ):
lowerCamelCase__ : Dict =self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_lowerCAmelCase ) - 1,
**model_input,
}
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : int =inputs['candidate_label']
lowerCamelCase__ : Optional[Any] =inputs['sequence']
lowerCamelCase__ : Union[str, Any] ={k: inputs[k] for k in self.tokenizer.model_input_names}
lowerCamelCase__ : Optional[Any] =self.model(**_lowerCAmelCase )
lowerCamelCase__ : List[str] ={
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :str=False ):
"""simple docstring"""
lowerCamelCase__ : Dict =[outputs['candidate_label'] for outputs in model_outputs]
lowerCamelCase__ : str =[outputs['sequence'] for outputs in model_outputs]
lowerCamelCase__ : Optional[int] =np.concatenate([output['logits'].numpy() for output in model_outputs] )
lowerCamelCase__ : Optional[int] =logits.shape[0]
lowerCamelCase__ : List[str] =len(_lowerCAmelCase )
lowerCamelCase__ : List[str] =N // n
lowerCamelCase__ : str =logits.reshape((num_sequences, n, -1) )
if multi_label or len(_lowerCAmelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowerCamelCase__ : Any =self.entailment_id
lowerCamelCase__ : Union[str, Any] =-1 if entailment_id == 0 else 0
lowerCamelCase__ : int =reshaped_outputs[..., [contradiction_id, entailment_id]]
lowerCamelCase__ : str =np.exp(_lowerCAmelCase ) / np.exp(_lowerCAmelCase ).sum(-1 , keepdims=_lowerCAmelCase )
lowerCamelCase__ : int =scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowerCamelCase__ : Optional[int] =reshaped_outputs[..., self.entailment_id]
lowerCamelCase__ : int =np.exp(_lowerCAmelCase ) / np.exp(_lowerCAmelCase ).sum(-1 , keepdims=_lowerCAmelCase )
lowerCamelCase__ : List[str] =list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 126
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 159
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] =field(
default=A__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] =field(
default=A__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] =field(
default=A__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool =field(default=A__ , metadata={"help": "Whether tp freeze the encoder."} )
lowerCamelCase : bool =field(default=A__ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowerCamelCase : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
lowerCamelCase : Optional[int] =field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : Optional[int] =field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : Optional[int] =field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
lowerCamelCase : Optional[int] =field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
lowerCamelCase : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
lowerCamelCase : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
lowerCamelCase : Optional[str] =field(default=A__ , metadata={"help": "Source language id for translation."} )
lowerCamelCase : Optional[str] =field(default=A__ , metadata={"help": "Target language id for translation."} )
lowerCamelCase : Optional[int] =field(default=A__ , metadata={"help": "# num_beams to use for evaluation."} )
lowerCamelCase : bool =field(
default=A__ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def snake_case_ (__A : Optional[Any] , __A : Optional[int] , __A : Optional[Any] ) -> Optional[int]:
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(lowercase__ , os.path.join(lowercase__ , f'''{split}_results.json''' ) )
def snake_case_ () -> Tuple:
__lowerCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase : Any = parser.parse_args_into_dataclasses()
check_output_dir(lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCAmelCase : Union[str, Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase__ , lowercase__ , lowercase__ ):
assert hasattr(lowercase__ , lowercase__ ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(lowercase__ , lowercase__ , getattr(lowercase__ , lowercase__ ) )
__lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCAmelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__lowerCAmelCase : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__lowerCAmelCase : Dict = SeqaSeqDataset
# Get datasets
__lowerCAmelCase : Tuple = (
dataset_class(
lowercase__ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
__lowerCAmelCase : List[str] = (
dataset_class(
lowercase__ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__lowerCAmelCase : Any = (
dataset_class(
lowercase__ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__lowerCAmelCase : Any = (
build_compute_metrics_fn(data_args.task , lowercase__ ) if training_args.predict_with_generate else None
)
__lowerCAmelCase : Tuple = SeqaSeqTrainer(
model=lowercase__ , args=lowercase__ , data_args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , data_collator=SeqaSeqDataCollator(
lowercase__ , lowercase__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase__ , tokenizer=lowercase__ , )
__lowerCAmelCase : Union[str, Any] = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
__lowerCAmelCase : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__lowerCAmelCase : Any = train_result.metrics
__lowerCAmelCase : Union[str, Any] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase__ , training_args.output_dir )
all_metrics.update(lowercase__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase : Any = trainer.evaluate(metric_key_prefix="""val""" )
__lowerCAmelCase : List[Any] = data_args.n_val
__lowerCAmelCase : Tuple = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase__ , training_args.output_dir )
all_metrics.update(lowercase__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
__lowerCAmelCase : Optional[int] = trainer.predict(test_dataset=lowercase__ , metric_key_prefix="""test""" )
__lowerCAmelCase : Union[str, Any] = test_output.metrics
__lowerCAmelCase : Dict = data_args.n_test
if trainer.is_world_process_zero():
__lowerCAmelCase : List[str] = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase__ , training_args.output_dir )
all_metrics.update(lowercase__ )
if training_args.predict_with_generate:
__lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ )
__lowerCAmelCase : str = lmap(str.strip , lowercase__ )
write_txt_file(lowercase__ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase__ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def snake_case_ (__A : Tuple ) -> Dict:
main()
if __name__ == "__main__":
main()
| 351
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def snake_case_ (__A : int ) -> List[Any]:
random.seed(__A )
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# ^^ safe to call this function even if cuda is not available
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : Iterable[torch.nn.Parameter] , lowerCAmelCase : float = 0.9999 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 0 , lowerCAmelCase : bool = False , lowerCAmelCase : Union[float, int] = 1.0 , lowerCAmelCase : Union[float, int] = 2 / 3 , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : Dict[str, Any] = None , **lowerCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
if isinstance(lowerCAmelCase , torch.nn.Module ):
__lowerCAmelCase : Optional[int] = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase , standard_warn=lowerCAmelCase , )
__lowerCAmelCase : int = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__lowerCAmelCase : Optional[Any] = True
if kwargs.get("""max_value""" , lowerCAmelCase ) is not None:
__lowerCAmelCase : Optional[Any] = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase , standard_warn=lowerCAmelCase )
__lowerCAmelCase : Dict = kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase ) is not None:
__lowerCAmelCase : Optional[int] = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase , standard_warn=lowerCAmelCase )
__lowerCAmelCase : Any = kwargs["""min_value"""]
__lowerCAmelCase : Optional[Any] = list(lowerCAmelCase )
__lowerCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase ) is not None:
__lowerCAmelCase : str = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase , standard_warn=lowerCAmelCase )
self.to(device=kwargs["""device"""] )
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Any = decay
__lowerCAmelCase : Any = min_decay
__lowerCAmelCase : List[Any] = update_after_step
__lowerCAmelCase : Optional[Any] = use_ema_warmup
__lowerCAmelCase : Union[str, Any] = inv_gamma
__lowerCAmelCase : Union[str, Any] = power
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : str = None # set in `step()`
__lowerCAmelCase : int = model_cls
__lowerCAmelCase : List[Any] = model_config
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Dict ) -> "EMAModel":
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = model_cls.load_config(lowerCAmelCase , return_unused_kwargs=lowerCAmelCase )
__lowerCAmelCase : Any = model_cls.from_pretrained(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = cls(model.parameters() , model_cls=lowerCAmelCase , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase )
return ema_model
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
__lowerCAmelCase : Dict = self.model_cls.from_config(self.model_config )
__lowerCAmelCase : Optional[Any] = self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase )
model.register_to_config(**lowerCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : int ) -> float:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__lowerCAmelCase : str = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__lowerCAmelCase : Optional[int] = (1 + step) / (10 + step)
__lowerCAmelCase : Any = min(lowerCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
__lowerCAmelCase : Union[str, Any] = max(lowerCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Iterable[torch.nn.Parameter] ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase , torch.nn.Module ):
__lowerCAmelCase : int = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase , standard_warn=lowerCAmelCase , )
__lowerCAmelCase : Any = parameters.parameters()
__lowerCAmelCase : Any = list(lowerCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__lowerCAmelCase : Optional[Any] = self.get_decay(self.optimization_step )
__lowerCAmelCase : Dict = decay
__lowerCAmelCase : Tuple = 1 - decay
__lowerCAmelCase : str = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__lowerCAmelCase : List[Any] = deepspeed.zero.GatheredParameters(lowerCAmelCase , modifier_rank=lowerCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
__lowerCAmelCase : Dict = list(lowerCAmelCase )
for s_param, param in zip(self.shadow_params , lowerCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None ) -> None:
"""simple docstring"""
__lowerCAmelCase : List[str] = [
p.to(device=lowerCAmelCase , dtype=lowerCAmelCase ) if p.is_floating_point() else p.to(device=lowerCAmelCase )
for p in self.shadow_params
]
def SCREAMING_SNAKE_CASE ( self : int ) -> dict:
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
__lowerCAmelCase : Dict = [param.detach().cpu().clone() for param in parameters]
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
__lowerCAmelCase : Union[str, Any] = None
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : dict ) -> None:
"""simple docstring"""
__lowerCAmelCase : List[str] = copy.deepcopy(lowerCAmelCase )
__lowerCAmelCase : int = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
__lowerCAmelCase : Tuple = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase ):
raise ValueError("""Invalid min_decay""" )
__lowerCAmelCase : List[Any] = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase ):
raise ValueError("""Invalid optimization_step""" )
__lowerCAmelCase : Union[str, Any] = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase ):
raise ValueError("""Invalid update_after_step""" )
__lowerCAmelCase : List[str] = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase ):
raise ValueError("""Invalid use_ema_warmup""" )
__lowerCAmelCase : Optional[int] = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
__lowerCAmelCase : Any = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
__lowerCAmelCase : Optional[Any] = state_dict.get("""shadow_params""" , lowerCAmelCase )
if shadow_params is not None:
__lowerCAmelCase : int = shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 139
| 0
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
a : List[Any] = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
a : str = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
__UpperCAmelCase : str = set()
__UpperCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Dict = char
__UpperCAmelCase : int = set(_lowerCAmelCase )
return pairs
class a ( lowercase__ ):
"""simple docstring"""
a : Any = VOCAB_FILES_NAMES
a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , __lowercase : Dict , __lowercase : List[Any] , __lowercase : List[str]="<s>" , __lowercase : Dict="</s>" , __lowercase : List[Any]="</s>" , __lowercase : Dict="<s>" , __lowercase : int="<unk>" , __lowercase : Tuple="<pad>" , __lowercase : int="<mask>" , **__lowercase : Dict , ) -> Dict:
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__UpperCAmelCase : Optional[Any] = vocab_file
__UpperCAmelCase : Optional[Any] = merges_file
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Any = 1
__UpperCAmelCase : Optional[int] = 2
__UpperCAmelCase : Union[str, Any] = 3
self.add_from_file(__lowercase )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
with open(__lowercase , encoding="""utf-8""" ) as merges_handle:
__UpperCAmelCase : List[str] = merges_handle.read().split("""\n""" )[:-1]
__UpperCAmelCase : Dict = [tuple(merge.split()[:-1] ) for merge in merges]
__UpperCAmelCase : str = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__UpperCAmelCase : Any = {}
def UpperCAmelCase ( self : List[str] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : Dict = [self.cls_token_id]
__UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self : Optional[Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase ( self : List[Any] ) -> Any:
return len(self.encoder )
def UpperCAmelCase ( self : str ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self : Optional[int] , __lowercase : List[str] ) -> List[str]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(__lowercase )
__UpperCAmelCase : List[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__UpperCAmelCase : Union[str, Any] = get_pairs(__lowercase )
if not pairs:
return token
while True:
__UpperCAmelCase : Dict = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : str = bigram
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Dict = 0
while i < len(__lowercase ):
try:
__UpperCAmelCase : List[str] = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Dict = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Tuple = tuple(__lowercase )
__UpperCAmelCase : Optional[Any] = new_word
if len(__lowercase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowercase )
__UpperCAmelCase : int = """@@ """.join(__lowercase )
__UpperCAmelCase : Optional[Any] = word[:-4]
__UpperCAmelCase : Tuple = word
return word
def UpperCAmelCase ( self : str , __lowercase : int ) -> List[str]:
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Dict = re.findall(r"""\S+\n?""" , __lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self : int , __lowercase : Union[str, Any] ) -> Any:
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self : Optional[int] , __lowercase : Tuple ) -> List[str]:
return self.decoder.get(__lowercase , self.unk_token )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Optional[int] = """ """.join(__lowercase ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase ( self : Optional[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[Any] = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : Any = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowercase ):
copyfile(self.merges_file , __lowercase )
return out_vocab_file, out_merge_file
def UpperCAmelCase ( self : Optional[Any] , __lowercase : List[Any] ) -> Optional[int]:
if isinstance(__lowercase , __lowercase ):
try:
with open(__lowercase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(__lowercase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
__UpperCAmelCase : int = f.readlines()
for lineTmp in lines:
__UpperCAmelCase : Dict = lineTmp.strip()
__UpperCAmelCase : List[Any] = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
__UpperCAmelCase : List[str] = line[:idx]
__UpperCAmelCase : Tuple = len(self.encoder )
| 114
|
_lowerCAmelCase : Dict = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_lowerCAmelCase : str = ["a", "b", "c", "d", "e"]
def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = start
# add current to visited
visited.append(_lowerCAmelCase )
UpperCAmelCase__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCAmelCase__ = topological_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(_lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
UpperCAmelCase__ = topological_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = topological_sort("a", [], [])
print(sort)
| 169
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_UpperCamelCase: Any = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase = ['pixel_values']
def __init__( self : int, lowerCAmelCase : List[Any] = True, lowerCAmelCase : Union[str, Any] = None, lowerCAmelCase : str = PILImageResampling.BICUBIC, lowerCAmelCase : int = True, lowerCAmelCase : Dict = None, lowerCAmelCase : Any = True, lowerCAmelCase : str = 1 / 255, lowerCAmelCase : str = True, lowerCAmelCase : int = None, lowerCAmelCase : List[Any] = None, lowerCAmelCase : Any = True, **lowerCAmelCase : str, ) -> None:
super().__init__(**lowerCAmelCase )
lowercase : List[Any] = size if size is not None else {'shortest_edge': 224}
lowercase : Tuple = get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
lowercase : List[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase : int = get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase, param_name='crop_size' )
lowercase : int = do_resize
lowercase : str = size
lowercase : Union[str, Any] = resample
lowercase : List[Any] = do_center_crop
lowercase : Union[str, Any] = crop_size
lowercase : Any = do_rescale
lowercase : Tuple = rescale_factor
lowercase : Optional[int] = do_normalize
lowercase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase : Optional[int] = do_convert_rgb
def lowercase ( self : Union[str, Any], lowerCAmelCase : Optional[Any], lowerCAmelCase : str, lowerCAmelCase : Union[str, Any] = PILImageResampling.BICUBIC, lowerCAmelCase : Optional[Any] = None, **lowerCAmelCase : List[str], ) -> np.ndarray:
lowercase : List[Any] = get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase : Optional[int] = get_resize_output_image_size(lowerCAmelCase, size=size['shortest_edge'], default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] = None, **lowerCAmelCase : str, ) -> np.ndarray:
lowercase : Optional[Any] = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase, size=(size['height'], size['width']), data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Optional[int], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Optional[int] = None, **lowerCAmelCase : str, ) -> List[Any]:
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : str, lowerCAmelCase : Dict, lowerCAmelCase : Optional[Any], lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any] = None, **lowerCAmelCase : Tuple, ) -> np.ndarray:
return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Optional[Any], lowerCAmelCase : List[Any], lowerCAmelCase : Optional[Any] = None, lowerCAmelCase : Any = None, lowerCAmelCase : int = None, lowerCAmelCase : List[str] = None, lowerCAmelCase : str = None, lowerCAmelCase : List[str] = None, lowerCAmelCase : int = None, lowerCAmelCase : Optional[Any] = None, lowerCAmelCase : Optional[Any] = None, lowerCAmelCase : int = None, lowerCAmelCase : Optional[int] = None, lowerCAmelCase : str = None, lowerCAmelCase : Tuple = ChannelDimension.FIRST, **lowerCAmelCase : Union[str, Any], ) -> PIL.Image.Image:
lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase : List[Any] = size if size is not None else self.size
lowercase : Tuple = get_size_dict(lowerCAmelCase, param_name='size', default_to_square=lowerCAmelCase )
lowercase : Dict = resample if resample is not None else self.resample
lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = crop_size if crop_size is not None else self.crop_size
lowercase : int = get_size_dict(lowerCAmelCase, param_name='crop_size', default_to_square=lowerCAmelCase )
lowercase : Any = do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : str = image_mean if image_mean is not None else self.image_mean
lowercase : List[Any] = image_std if image_std is not None else self.image_std
lowercase : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase : int = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase : List[str] = [convert_to_rgb(lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase : str = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
lowercase : int = [self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images]
if do_center_crop:
lowercase : Any = [self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple = [self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_normalize:
lowercase : List[Any] = [self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images]
lowercase : Tuple = [to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowercase : List[Any] = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 365
|
"""simple docstring"""
import unittest
from transformers import DonutProcessor
_UpperCamelCase: Any = 'naver-clova-ix/donut-base'
class a__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ) -> Tuple:
lowercase : Any = DonutProcessor.from_pretrained(lowerCAmelCase )
def lowercase ( self : Dict ) -> Union[str, Any]:
lowercase : Tuple = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowercase : Tuple = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowercase : Any = self.processor.tokenajson(lowerCAmelCase )
self.assertDictEqual(lowerCAmelCase, lowerCAmelCase )
| 53
| 0
|
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_SCREAMING_SNAKE_CASE ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ = random.Random()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
if rng is None:
a__: Any = global_rng
a__: int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=4_00 , lowercase=20_00 , lowercase=1 , lowercase=0.0 , lowercase=1_60_00 , lowercase=True , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = parent
a__: Optional[int] = batch_size
a__: Optional[Any] = min_seq_length
a__: Optional[int] = max_seq_length
a__: Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__: Dict = feature_size
a__: Any = padding_value
a__: Optional[Any] = sampling_rate
a__: Optional[Any] = return_attention_mask
a__: str = do_normalize
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self , lowercase=False , lowercase=False) -> Tuple:
'''simple docstring'''
def _flatten(lowercase):
return list(itertools.chain(*lowercase))
if equal_length:
a__: Dict = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
a__: List[Any] = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a__: str = [np.asarray(lowercase) for x in speech_inputs]
return speech_inputs
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = WavaVecaFeatureExtractor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[int] = WavaVecaFeatureExtractionTester(self)
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowercase , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0) - 1) < 1e-3))
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a__: Optional[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: List[str] = [np.asarray(lowercase) for speech_input in speech_inputs]
# Test not batched input
a__: Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='np').input_values
a__: Dict = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test batched
a__: Dict = feat_extract(lowercase , return_tensors='np').input_values
a__: int = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test 2-D numpy arrays are batched.
a__: int = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
a__: Union[str, Any] = np.asarray(lowercase)
a__: int = feat_extract(lowercase , return_tensors='np').input_values
a__: Any = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: List[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Optional[int] = ['longest', 'max_length', 'do_not_pad']
a__: List[Any] = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: Dict = feat_extract(lowercase , padding=lowercase , max_length=lowercase , return_tensors='np')
a__: Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self.assertTrue(input_values[0][8_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self.assertTrue(input_values[0][10_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Optional[int] = range(8_00 , 14_00 , 2_00)
a__: List[str] = [floats_list((1, x))[0] for x in lengths]
a__: Tuple = ['longest', 'max_length', 'do_not_pad']
a__: Dict = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: int = feat_extract(lowercase , max_length=lowercase , padding=lowercase)
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Any = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Dict = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='max_length' , return_tensors='np')
a__: int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: int = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: str = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='longest' , return_tensors='np')
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00))
a__: Dict = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Tuple = feat_extract(
lowercase , truncation=lowercase , max_length=20_00 , padding='longest' , return_tensors='np')
a__: str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00))
@require_torch
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
import torch
a__: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Tuple = np.random.rand(1_00).astype(np.floataa)
a__: Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__: Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
a__: Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
@slow
@require_torch
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
a__: str = WavaVecaConfig.from_pretrained(lowercase)
a__: str = WavaVecaFeatureExtractor.from_pretrained(lowercase)
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer')
| 290
| 1
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = BartphoTokenizer
lowercase__ = False
lowercase__ = True
def __lowerCAmelCase ( self : int ):
super().setUp()
lowerCAmelCase__ : List[str] = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
lowerCAmelCase__ : Optional[int] = dict(zip(lowercase_ ,range(len(lowercase_ ) ) ) )
lowerCAmelCase__ : List[str] = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
lowerCAmelCase__ : int = BartphoTokenizer(lowercase_ ,self.monolingual_vocab_file ,**self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] ,**lowercase_ : Any ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname ,**lowercase_ )
def __lowerCAmelCase ( self : Any ,lowercase_ : Optional[Any] ):
lowerCAmelCase__ : List[str] = '''This is a là test'''
lowerCAmelCase__ : Optional[int] = '''This is a<unk><unk> test'''
return input_text, output_text
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Tuple = BartphoTokenizer(lowercase_ ,self.monolingual_vocab_file ,**self.special_tokens_map )
lowerCAmelCase__ : List[Any] = '''This is a là test'''
lowerCAmelCase__ : Union[str, Any] = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
lowerCAmelCase__ : List[Any] = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Tuple = tokens + [tokenizer.unk_token]
lowerCAmelCase__ : str = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) ,lowercase_ )
| 74
|
"""simple docstring"""
import qiskit
def __SCREAMING_SNAKE_CASE ( A_ = 2 ):
lowerCAmelCase__ : int = qubits
# Using Aer's simulator
lowerCAmelCase__ : str = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
lowerCAmelCase__ : str = qiskit.QuantumCircuit(A_ , A_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , A_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , A_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A_ ) ) , list(range(A_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCAmelCase__ : List[Any] = qiskit.execute(A_ , A_ , shots=10_00 )
return job.result().get_counts(A_ )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 74
| 1
|
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_snake_case = logging.get_logger(__name__)
_snake_case = 'T5Config'
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = jnp.zeros_like(UpperCamelCase__ )
_a : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_a : Union[str, Any] = shifted_input_ids.at[:, 0].set(UpperCamelCase__ )
_a : Union[str, Any] = jnp.where(shifted_input_ids == -1_0_0 , UpperCamelCase__ , UpperCamelCase__ )
return shifted_input_ids
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : List[Any] = '''mt5'''
UpperCamelCase : Optional[int] = MTaConfig
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Dict = '''mt5'''
UpperCamelCase : Union[str, Any] = MTaConfig
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = '''mt5'''
UpperCamelCase : List[Any] = MTaConfig
| 294
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['PerceiverFeatureExtractor']
_snake_case = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 1
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a_ ( _UpperCAmelCase : Namespace ) -> Union[str, Any]:
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
A__ : Optional[int] = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
def A_ ( __a : ArgumentParser ) -> str:
'''simple docstring'''
__snake_case : List[str] = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=__a , required=__a , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=__a , required=__a , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=__a , required=__a , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=__a , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=__a , default=__a , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=__a )
def __init__( self : Tuple , __a : str , __a : str , __a : str , __a : str , __a : str , *__a : int , ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = logging.get_logger('transformers-cli/converting' )
self._logger.info(f'''Loading model {model_type}''' )
__snake_case : Dict = model_type
__snake_case : Optional[Any] = tf_checkpoint
__snake_case : str = pytorch_dump_output
__snake_case : Dict = config
__snake_case : str = finetuning_task_name
def A_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
if "ckpt" in self._tf_checkpoint.lower():
__snake_case : Optional[int] = self._tf_checkpoint
__snake_case : List[str] = ''
else:
__snake_case : int = self._tf_checkpoint
__snake_case : Tuple = ''
convert_transfo_xl_checkpoint_to_pytorch(
__a , self._config , self._pytorch_dump_output , __a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 367
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : List[Any] = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
A__ : List[Any] = {
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
A__ : Optional[Any] = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ElectraTokenizer
def __init__( self : int , __a : List[Any]=None , __a : int=None , __a : List[str]=True , __a : Any="[UNK]" , __a : Any="[SEP]" , __a : Union[str, Any]="[PAD]" , __a : Dict="[CLS]" , __a : List[Any]="[MASK]" , __a : str=True , __a : Optional[int]=None , **__a : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
__snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __a ) != do_lower_case
or normalizer_state.get('strip_accents' , __a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars
):
__snake_case : List[Any] = getattr(__a , normalizer_state.pop('type' ) )
__snake_case : str = do_lower_case
__snake_case : Optional[int] = strip_accents
__snake_case : Any = tokenize_chinese_chars
__snake_case : Union[str, Any] = normalizer_class(**__a )
__snake_case : Any = do_lower_case
def A_ ( self : Any , __a : List[str] , __a : Optional[Any]=None ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
__snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case : Tuple = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 0
| 0
|
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def A_ ( snake_case ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def A_ ( snake_case ):
class _snake_case :
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE:int = metric_id
class _snake_case :
_A : Optional[int] = [MetricMock(_a ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def __UpperCamelCase ( self : Optional[int] ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
if "tmp_path" in args:
SCREAMING_SNAKE_CASE:int = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(snake_case , match="https://huggingface.co/docs/evaluate" ):
func(*snake_case )
| 139
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
def __init__( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Tuple ):
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." ,SCREAMING_SNAKE_CASE__ ,)
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
| 139
| 1
|
from heapq import heappop, heappush
import numpy as np
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
A , A : int = grid.shape
A : List[Any] = [-1, 1, 0, 0]
A : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
A , A : Optional[int] = [(0, source)], set()
A : List[Any] = np.full((rows, cols) , np.inf )
A : Optional[Any] = 0
A : Optional[int] = np.empty((rows, cols) , dtype=UpperCamelCase__ )
A : List[str] = None
while queue:
((A) , (A)) : Union[str, Any] = heappop(UpperCamelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
A : List[str] = []
while (x, y) != source:
path.append((x, y) )
A , A : Optional[Any] = predecessors[x, y]
path.append(UpperCamelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase__ ) ):
A , A : int = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
A : Tuple = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase__ , (dist + 1, (nx, ny)) )
A : int = dist + 1
A : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = ["image_processor", "tokenizer"]
a__ = "BridgeTowerImageProcessor"
a__ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any ) -> Optional[int]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : Dict , ) -> BatchEncoding:
A : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
# add pixel_values + pixel_mask
A : List[Any] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , do_normalize=__lowerCamelCase , do_center_crop=__lowerCamelCase , **__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def SCREAMING_SNAKE_CASE__ ( self : int , *__lowerCamelCase : List[str] , **__lowerCamelCase : str ) -> List[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : str ) -> Any:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
A : Dict = self.tokenizer.model_input_names
A : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 256
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE : Optional[int] = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102
|
'''simple docstring'''
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , __A : list[list[int]] ):
__UpperCamelCase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(__A ) != 0:
__UpperCamelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__A ) != cols:
raise error
for value in row:
if not isinstance(__A , (int, float) ):
raise error
__UpperCamelCase = rows
else:
__UpperCamelCase = []
def _lowerCamelCase ( self : int ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowerCamelCase ( self : str ):
return len(self.rows )
@property
def _lowerCamelCase ( self : Any ):
return len(self.rows[0] )
@property
def _lowerCamelCase ( self : Optional[Any] ):
return (self.num_rows, self.num_columns)
@property
def _lowerCamelCase ( self : Dict ):
return self.order[0] == self.order[1]
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Any ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowerCamelCase ( self : List[str] ):
return bool(self.determinant() )
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
__UpperCamelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__A ).determinant()
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
if (row + column) % 2 == 0:
return self.get_minor(__A , __A )
return -1 * self.get_minor(__A , __A )
def _lowerCamelCase ( self : List[str] ):
return Matrix(
[
[self.get_minor(__A , __A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowerCamelCase ( self : Union[str, Any] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ):
return str(self.rows )
def __str__( self : Union[str, Any] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(__A ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in row:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(__A )
else:
__UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:]
def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in column:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
__UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__UpperCamelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , __A : object ):
if not isinstance(__A , __A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , __A : object ):
return not self == other
def __neg__( self : List[Any] ):
return self * -1
def __add__( self : List[str] , __A : Matrix ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : str , __A : Matrix ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : str , __A : Matrix | int | float ):
if isinstance(__A , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__A , __A ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(__A , __A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Union[str, Any] , __A : int ):
if not isinstance(__A , __A ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
__UpperCamelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ):
return sum(row[i] * column[i] for i in range(len(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 362
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger('''transformers.models.speecht5''')
def lowerCamelCase_ ( _a : str , _a : int , _a : Union[str, Any] ):
'''simple docstring'''
hf_model.apply_weight_norm()
UpperCAmelCase_ : Optional[int] = checkpoint["""input_conv.weight_g"""]
UpperCAmelCase_ : str = checkpoint["""input_conv.weight_v"""]
UpperCAmelCase_ : str = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase_ : Dict = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase_ : Any = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase_ : Union[str, Any] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase_ : Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase_ : Dict = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase_ : Optional[Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase_ : Tuple = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase_ : Optional[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase_ : Tuple = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase_ : Optional[int] = checkpoint["""output_conv.1.weight_g"""]
UpperCAmelCase_ : Optional[Any] = checkpoint["""output_conv.1.weight_v"""]
UpperCAmelCase_ : Union[str, Any] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowerCamelCase_ ( _a : Tuple , _a : int , _a : Any , _a : Tuple=None , _a : Dict=None , ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(_a )
else:
UpperCAmelCase_ : str = SpeechTaHifiGanConfig()
UpperCAmelCase_ : List[str] = SpeechTaHifiGan(_a )
UpperCAmelCase_ : int = torch.load(_a )
load_weights(orig_checkpoint["""model"""]["""generator"""] , _a , _a )
UpperCAmelCase_ : List[Any] = np.load(_a )
UpperCAmelCase_ : Optional[Any] = stats[0].reshape(-1 )
UpperCAmelCase_ : int = stats[1].reshape(-1 )
UpperCAmelCase_ : Any = torch.from_numpy(_a ).float()
UpperCAmelCase_ : int = torch.from_numpy(_a ).float()
model.save_pretrained(_a )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_a )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 59
| 0
|
"""simple docstring"""
from collections import namedtuple
_lowercase = namedtuple('''from_to''', '''from_ to''')
_lowercase = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00_454, 264.172),
'''cubicyard''': from_to(0.76_455, 1.30_795),
'''cubicfoot''': from_to(0.028, 35.3_147),
'''cup''': from_to(0.000_236_588, 4_226.75),
}
def _snake_case ( snake_case__ : float , snake_case__ : str , snake_case__ : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ', '.join(snake_case__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ', '.join(snake_case__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase = 16
_lowercase = 32
def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ):
A = AutoTokenizer.from_pretrained(snake_case__ )
A = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
A = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
# Initialize accelerator
A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config['lr']
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
A = args.model_name_or_path
set_seed(snake_case__ )
A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
A = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A = 1
A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
A = 0
# We also need to keep track of the stating epoch so files are named properly
A = 0
# Now we train the model
A = evaluate.load('glue' , 'mrpc' )
A = 0
A = {}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
A = model(**snake_case__ )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**snake_case__ )
A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case__ )
A = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
A = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
def _snake_case ( ):
A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , )
parser.add_argument(
'--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , )
A = parser.parse_args()
A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 74
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCAmelCase : Tuple = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase )
else:
UpperCAmelCase : Dict = max(
mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , )
UpperCAmelCase : Tuple = val
return f[i][j]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : List[str] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
UpperCAmelCase : str = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
UpperCAmelCase : Optional[Any] = dp[i - 1][w_]
return dp[n][w_], dp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int:
if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
UpperCAmelCase : Optional[int] = len(_lowercase )
if num_items != len(_lowercase ):
UpperCAmelCase : Optional[Any] = (
"""The number of weights must be the same as the number of values.\n"""
F'''But got {num_items} weights and {len(_lowercase )} values'''
)
raise ValueError(_lowercase )
for i in range(_lowercase ):
if not isinstance(wt[i] , _lowercase ):
UpperCAmelCase : Union[str, Any] = (
"""All weights must be integers but got weight of """
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(_lowercase )
UpperCAmelCase , UpperCAmelCase : List[Any] = knapsack(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : set = set()
_construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return optimal_val, example_optional_set
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase )
else:
optimal_set.add(_lowercase )
_construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase )
if __name__ == "__main__":
a : str = [3, 2, 4, 4]
a : str = [4, 3, 2, 3]
a : List[str] = 4
a : Optional[Any] = 6
a : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
a , a : List[str] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
a , a : Optional[Any] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 338
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : Optional[int] = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 338
| 1
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__UpperCAmelCase = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__UpperCAmelCase = concatenate_datasets
__UpperCAmelCase = DownloadConfig
__UpperCAmelCase = DownloadManager
__UpperCAmelCase = DownloadMode
__UpperCAmelCase = DownloadConfig
__UpperCAmelCase = DownloadMode
__UpperCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 29
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCAmelCase__ = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ElectraTokenizer
def __init__( self : Dict , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : str="[UNK]" , __UpperCAmelCase : Any="[SEP]" , __UpperCAmelCase : str="[PAD]" , __UpperCAmelCase : Optional[Any]="[CLS]" , __UpperCAmelCase : Union[str, Any]="[MASK]" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Optional[int] , ) ->str:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple=None ) ->str:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 0
| 0
|
def _snake_case( ) -> int:
'''simple docstring'''
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(SCREAMING_SNAKE_CASE__ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 365
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
"""simple docstring"""
def __init__( self : Any,lowercase_ : Optional[int],lowercase_ : Optional[int]=1_3,lowercase_ : int=7,lowercase_ : List[str]=True,lowercase_ : str=True,lowercase_ : List[str]=True,lowercase_ : Optional[Any]=True,lowercase_ : Dict=9_9,lowercase_ : Dict=2_4,lowercase_ : Union[str, Any]=2,lowercase_ : str=6,lowercase_ : Dict=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : Any=0.1,lowercase_ : Any=0.1,lowercase_ : Any=5_1_2,lowercase_ : Dict=1_6,lowercase_ : List[str]=2,lowercase_ : Dict=0.02,lowercase_ : Any=3,lowercase_ : Dict=None,lowercase_ : List[str]=1_0_0_0,)-> Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = scope
A__ = range_bbox
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = ids_tensor([self.batch_size, self.seq_length, 4],self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ = bbox[i, j, 3]
A__ = bbox[i, j, 1]
A__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ = bbox[i, j, 2]
A__ = bbox[i, j, 0]
A__ = t
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self : Dict )-> int:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,)
def snake_case__ ( self : Optional[Any],lowercase_ : Tuple,lowercase_ : str,lowercase_ : Optional[int],lowercase_ : Optional[Any],lowercase_ : str,lowercase_ : List[str],lowercase_ : Tuple,)-> Optional[Any]:
'''simple docstring'''
A__ = LiltModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,bbox=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,bbox=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.hidden_size) )
def snake_case__ ( self : Any,lowercase_ : Dict,lowercase_ : List[Any],lowercase_ : List[str],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : List[Any],)-> List[str]:
'''simple docstring'''
A__ = self.num_labels
A__ = LiltForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : int,lowercase_ : Union[str, Any],lowercase_ : int,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : Optional[int],lowercase_ : Tuple,lowercase_ : List[str],)-> Any:
'''simple docstring'''
A__ = LiltForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,start_positions=lowercase_,end_positions=lowercase_,)
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[int] )-> Tuple:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : List[str],lowercase_ : str,lowercase_ : Optional[Any],lowercase_ : Optional[Any] )-> Any:
'''simple docstring'''
return True
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
A__ = LiltModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Dict )-> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def snake_case__ ( self : List[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@slow
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = LiltModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any] )-> Dict:
'''simple docstring'''
A__ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase_ )
A__ = torch.tensor([[1, 2]],device=lowercase_ )
A__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]],device=lowercase_ )
# forward pass
with torch.no_grad():
A__ = model(input_ids=lowercase_,bbox=lowercase_ )
A__ = torch.Size([1, 2, 7_6_8] )
A__ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]],device=lowercase_,)
self.assertTrue(outputs.last_hidden_state.shape,lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3],lowercase_,atol=1E-3 ) )
| 282
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : Union[str, Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : str = ["pixel_values"]
def __init__( self : str , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BICUBIC , A : bool = True , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : Dict[str, int] = None , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Dict , ) -> None:
super().__init__(**A )
lowercase_ : Tuple = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase_ : Dict = get_size_dict(A )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase_ : List[Any] = get_size_dict(A , default_to_square=A , param_name='''crop_size''' )
lowercase_ : Optional[Any] = do_resize
lowercase_ : List[str] = do_rescale
lowercase_ : Optional[int] = do_normalize
lowercase_ : List[str] = do_center_crop
lowercase_ : Union[str, Any] = crop_size
lowercase_ : Optional[Any] = size
lowercase_ : Optional[Any] = resample
lowercase_ : Optional[Any] = rescale_factor
lowercase_ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ : Tuple = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A ( self : str , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BILINEAR , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
lowercase_ : str = get_size_dict(A )
if "shortest_edge" in size:
lowercase_ : Dict = get_resize_output_image_size(A , size=size['''shortest_edge'''] , default_to_square=A )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowercase_ : List[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(A , size=A , resample=A , data_format=A , **A )
def A ( self : str , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
lowercase_ : List[Any] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(A , size=(size['''height'''], size['''width''']) , data_format=A , **A )
def A ( self : Tuple , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Any ) -> np.ndarray:
return rescale(A , scale=A , data_format=A , **A )
def A ( self : Optional[int] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Tuple , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def A ( self : Dict , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : int = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : List[str] , ) -> BatchFeature:
lowercase_ : str = do_resize if do_resize is not None else self.do_resize
lowercase_ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : str = crop_size if crop_size is not None else self.crop_size
lowercase_ : int = get_size_dict(A , param_name='''crop_size''' , default_to_square=A )
lowercase_ : Union[str, Any] = resample if resample is not None else self.resample
lowercase_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : Tuple = image_mean if image_mean is not None else self.image_mean
lowercase_ : Tuple = image_std if image_std is not None else self.image_std
lowercase_ : Union[str, Any] = size if size is not None else self.size
lowercase_ : str = get_size_dict(A )
if not is_batched(A ):
lowercase_ : List[Any] = [images]
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase_ : Tuple = [to_numpy_array(A ) for image in images]
if do_resize:
lowercase_ : List[str] = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
lowercase_ : List[Any] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
lowercase_ : List[Any] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
lowercase_ : Dict = [self.normalize(image=A , mean=A , std=A ) for image in images]
lowercase_ : List[str] = [to_channel_dimension_format(A , A ) for image in images]
lowercase_ : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=A , tensor_type=A )
| 33
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str]=13 , __UpperCamelCase : str=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : int=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Dict=32 , __UpperCamelCase : int=5 , __UpperCamelCase : str=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : List[str]=16 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : str=False , __UpperCamelCase : Dict=True , __UpperCamelCase : Tuple="None" , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Any=None , ) -> Tuple:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = relative_attention
_UpperCamelCase = position_biased_input
_UpperCamelCase = pos_att_type
_UpperCamelCase = scope
def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase = self.get_config()
_UpperCamelCase = 300
return config
def _UpperCamelCase ( self : int , __UpperCamelCase : List[Any] ) -> str:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _UpperCamelCase ( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ) -> List[str]:
_UpperCamelCase = DebertaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
_UpperCamelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
_UpperCamelCase = model(__UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[int] ) -> Tuple:
_UpperCamelCase = DebertaForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> List[Any]:
_UpperCamelCase = self.num_labels
_UpperCamelCase = DebertaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Dict:
_UpperCamelCase = self.num_labels
_UpperCamelCase = DebertaForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) -> List[Any]:
_UpperCamelCase = DebertaForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase = DebertaModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def _UpperCamelCase ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ) -> List[str]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCamelCase )
def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> Tuple:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = DebertaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
pass
@slow
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
_UpperCamelCase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
# compare the actual values for a slice.
_UpperCamelCase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 256
| 0
|
from __future__ import annotations
__snake_case = list[list[int]]
# assigning initial values to the grid
__snake_case = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__snake_case = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCAmelCase_ ( __lowerCAmelCase )-> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCAmelCase_ ( __lowerCAmelCase )-> Matrix | None:
'''simple docstring'''
if location := find_empty_location(__lowerCAmelCase ):
UpperCAmelCase , UpperCAmelCase : int =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Dict =digit
if sudoku(__lowerCAmelCase ) is not None:
return grid
UpperCAmelCase : str =0
return None
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(__lowerCAmelCase , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__snake_case = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 371
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 78
| 0
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCAmelCase : Union[str, Any] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCAmelCase : int = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
__UpperCAmelCase : Union[str, Any] = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
__UpperCAmelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
__UpperCAmelCase : str = value
elif weight_type == "weight_v":
__UpperCAmelCase : Dict = value
elif weight_type == "bias":
__UpperCAmelCase : Dict = value
else:
__UpperCAmelCase : Tuple = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : List[Any] = fairseq_model.state_dict()
__UpperCAmelCase : str = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__UpperCAmelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCAmelCase : Dict = True
if "*" in mapped_key:
__UpperCAmelCase : Union[str, Any] = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
__UpperCAmelCase : Dict = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
__UpperCAmelCase : int = "weight_g"
elif "weight_v" in name:
__UpperCAmelCase : Dict = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
__UpperCAmelCase : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase : List[Any] = "weight"
else:
__UpperCAmelCase : List[str] = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = full_name.split("""conv_layers.""" )[-1]
__UpperCAmelCase : List[Any] = name.split(""".""" )
__UpperCAmelCase : Tuple = int(items[0] )
__UpperCAmelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCAmelCase : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCAmelCase : List[str] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCAmelCase : str = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCAmelCase : List[str] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any]=None ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = torch.load(__lowerCamelCase )
__UpperCAmelCase : List[Any] = WavLMConfigOrig(checkpoint["""cfg"""] )
__UpperCAmelCase : Optional[int] = WavLMOrig(__lowerCamelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
__UpperCAmelCase : str = WavLMConfig.from_pretrained(__lowerCamelCase )
else:
__UpperCAmelCase : Optional[int] = WavLMConfig()
__UpperCAmelCase : Any = WavLMModel(__lowerCamelCase )
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavlm.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase : Dict = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 115
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 59
| 0
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = "segformer"
def __init__(self : List[Any] , _A : int=3 , _A : List[Any]=4 , _A : Any=[2, 2, 2, 2] , _A : Dict=[8, 4, 2, 1] , _A : List[Any]=[3_2, 6_4, 1_6_0, 2_5_6] , _A : Tuple=[7, 3, 3, 3] , _A : Optional[int]=[4, 2, 2, 2] , _A : Dict=[1, 2, 5, 8] , _A : int=[4, 4, 4, 4] , _A : Dict="gelu" , _A : Tuple=0.0 , _A : Optional[Any]=0.0 , _A : List[Any]=0.1 , _A : Union[str, Any]=0.02 , _A : Dict=0.1 , _A : List[Any]=1E-6 , _A : List[str]=2_5_6 , _A : Optional[Any]=2_5_5 , **_A : str , ) -> Tuple:
super().__init__(**_A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , _A , )
snake_case = num_channels
snake_case = num_encoder_blocks
snake_case = depths
snake_case = sr_ratios
snake_case = hidden_sizes
snake_case = patch_sizes
snake_case = strides
snake_case = mlp_ratios
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = classifier_dropout_prob
snake_case = initializer_range
snake_case = drop_path_rate
snake_case = layer_norm_eps
snake_case = decoder_hidden_size
snake_case = kwargs.get("reshape_last_stage" , _A )
snake_case = semantic_loss_ignore_index
class lowerCamelCase ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = version.parse("1.11" )
@property
def UpperCAmelCase(self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase(self : Tuple ) -> float:
return 1E-4
@property
def UpperCAmelCase(self : List[str] ) -> int:
return 1_2
| 354
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_A = logging.getLogger(__name__)
_A = "pytorch_model.bin"
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
UpperCAmelCase__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "A csv or a json file containing the validation data."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "The name of the task to train on."} , )
UpperCAmelCase__ : Optional[List[str]] = dataclasses.field(
default=A_ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
UpperCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=A_ , metadata={"help": "Random seed for initialization."} , )
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case = dataset.filter(lambda A__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case = int(eval_result * len(A__ ) )
print(A__ )
snake_case = dataset.sort("probability" , reverse=A__ )
snake_case = dataset.select(range(A__ ) )
snake_case = dataset.remove_columns(["label", "probability"] )
snake_case = dataset.rename_column("prediction" , "label" )
snake_case = dataset.map(lambda A__ : {"label": idalabel[example["label"]]} )
snake_case = dataset.shuffle(seed=args.seed )
snake_case = os.path.join(A__ , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(A__ , index=A__ )
else:
dataset.to_json(A__ )
def lowercase_ ( A__ , A__ , A__ , A__ , **A__ ) -> List[Any]:
"""simple docstring"""
snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case = STModelArguments(model_name_or_path=A__ )
snake_case = STDataArguments(train_file=A__ , infer_file=A__ )
snake_case = STTrainingArguments(output_dir=A__ )
snake_case = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A__ ).items():
setattr(A__ , A__ , A__ )
for key, value in kwargs.items():
if hasattr(A__ , A__ ):
setattr(A__ , A__ , A__ )
# Sanity checks
snake_case = {}
snake_case = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case = args.train_file
snake_case = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case = args.eval_file
for key in data_files:
snake_case = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
snake_case = extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
snake_case = F'{args.output_dir}/self-train_iter-{{}}'.format
snake_case = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A__ )
os.makedirs(A__ , exist_ok=A__ )
accelerator.wait_for_everyone()
snake_case = None
snake_case = None
snake_case = 0
snake_case = False
# Show the progress bar
snake_case = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case = data_dir_format(A__ )
assert os.path.exists(A__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case = os.path.join(A__ , "stage-1" )
snake_case = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A__ , A__ ):
arguments_dict.update({key: value} )
snake_case = os.path.join(A__ , "best-checkpoint" , A__ )
if os.path.exists(A__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , A__ , A__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , A__ )
finetune(**A__ )
accelerator.wait_for_everyone()
assert os.path.exists(A__ )
logger.info("Self-training job completed: iteration: %d, stage: 1." , A__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case = os.path.join(A__ , "best-checkpoint" )
snake_case = os.path.join(A__ , "stage-2" )
# Update arguments_dict
snake_case = model_path
snake_case = data_files["train"]
snake_case = current_output_dir
snake_case = os.path.join(A__ , "best-checkpoint" , A__ )
if os.path.exists(A__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , A__ , A__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , A__ )
finetune(**A__ )
accelerator.wait_for_everyone()
assert os.path.exists(A__ )
logger.info("Self-training job completed: iteration: %d, stage: 2." , A__ )
snake_case = iteration
snake_case = data_dir_format(iteration + 1 )
snake_case = AutoConfig.from_pretrained(os.path.join(A__ , "best-checkpoint" ) )
snake_case = config.idalabel
snake_case = os.path.join(A__ , "eval_results_best-checkpoint.json" )
snake_case = os.path.join(A__ , "test_results_best-checkpoint.json" )
assert os.path.exists(A__ )
with open(A__ , "r" ) as f:
snake_case = float(json.load(A__ )[args.eval_metric] )
snake_case = os.path.join(A__ , "infer_output_best-checkpoint.csv" )
assert os.path.exists(A__ )
# Loading the dataset from local csv or json files.
snake_case = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
snake_case = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(A__ , exist_ok=A__ )
shutil.copy(A__ , os.path.join(A__ , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(A__ ):
shutil.copy(A__ , os.path.join(A__ , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(A__ , A__ , A__ , A__ , A__ , A__ )
accelerator.wait_for_everyone()
snake_case = os.path.join(A__ , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case = eval_result
if best_iteration is None:
snake_case = new_iteration
snake_case = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case = new_iteration
snake_case = new_eval_result
snake_case = 0
else:
if new_eval_result == best_eval_result:
snake_case = new_iteration
snake_case = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , A__ )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A__ , F'eval_results_iter-{iteration}.json' ) , os.path.join(A__ , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A__ , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(A__ , "eval_results_best-iteration.json" ) , )
| 137
| 0
|
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Any:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowerCAmelCase = mf_knapsack(i - 1 , snake_case__ , snake_case__ , snake_case__ )
else:
lowerCAmelCase = max(
mf_knapsack(i - 1 , snake_case__ , snake_case__ , snake_case__ ) , mf_knapsack(i - 1 , snake_case__ , snake_case__ , j - wt[i - 1] ) + val[i - 1] , )
lowerCAmelCase = val
return f[i][j]
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
lowerCAmelCase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowerCAmelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowerCAmelCase = dp[i - 1][w_]
return dp[n][w_], dp
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
if not (isinstance(snake_case__ , (list, tuple) ) and isinstance(snake_case__ , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
lowerCAmelCase = len(snake_case__ )
if num_items != len(snake_case__ ):
lowerCAmelCase = (
'''The number of weights must be the same as the number of values.\n'''
f"But got {num_items} weights and {len(snake_case__ )} values"
)
raise ValueError(snake_case__ )
for i in range(snake_case__ ):
if not isinstance(wt[i] , snake_case__ ):
lowerCAmelCase = (
'''All weights must be integers but got weight of '''
f"type {type(wt[i] )} at index {i}"
)
raise TypeError(snake_case__ )
lowerCAmelCase , lowerCAmelCase = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase = set()
_construct_solution(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return optimal_val, example_optional_set
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> str:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case__ , snake_case__ , i - 1 , snake_case__ , snake_case__ )
else:
optimal_set.add(snake_case__ )
_construct_solution(snake_case__ , snake_case__ , i - 1 , j - wt[i - 1] , snake_case__ )
if __name__ == "__main__":
lowercase__ : Optional[Any] = [3, 2, 4, 4]
lowercase__ : Tuple = [4, 3, 2, 3]
lowercase__ : Optional[int] = 4
lowercase__ : Optional[Any] = 6
lowercase__ : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowercase__ , lowercase__ : List[str] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowercase__ , lowercase__ : str = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 338
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,)
UpperCAmelCase_ : int = (("""num_inference_steps""", 25),)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 338
| 1
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def lowerCamelCase_ ( ):
lowerCamelCase_ = Node(1 )
lowerCamelCase_ = Node(2 )
lowerCamelCase_ = Node(3 )
lowerCamelCase_ = Node(4 )
lowerCamelCase_ = Node(5 )
return tree
def lowerCamelCase_ ( lowerCamelCase__ ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase_ ( lowerCamelCase__ ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase_ ( lowerCamelCase__ ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase_ ( lowerCamelCase__ ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = []
if root is None:
return output
lowerCamelCase_ = deque([root] )
while process_queue:
lowerCamelCase_ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = []
def populate_output(lowerCamelCase__ , lowerCamelCase__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowercase__ , lowercase__ )
return output
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = []
def populate_output(lowerCamelCase__ , lowerCamelCase__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowercase__ , lowercase__ )
return output
def lowerCamelCase_ ( lowerCamelCase__ ):
if root is None:
return []
lowerCamelCase_ = []
lowerCamelCase_ = 0
lowerCamelCase_ = height(lowercase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowercase__ , lowercase__ ) )
lowerCamelCase_ = 1
else:
output.append(get_nodes_from_right_to_left(lowercase__ , lowercase__ ) )
lowerCamelCase_ = 0
return output
def lowerCamelCase_ ( ): # Main function for testing.
lowerCamelCase_ = make_tree()
print(F'In-order Traversal: {inorder(lowercase__ )}' )
print(F'Pre-order Traversal: {preorder(lowercase__ )}' )
print(F'Post-order Traversal: {postorder(lowercase__ )}' , "\n" )
print(F'Height of Tree: {height(lowercase__ )}' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(lowercase__ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(lowercase__ ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(lowercase__ , level=lowercase__ ) )
print("\nZigZag order Traversal: " )
print(zigzag(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 362
|
import os
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(grid[0] )
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowerCamelCase__ ):
for j in range(n_rows - 3 ):
lowerCamelCase_ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCamelCase_ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCamelCase_ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCamelCase_ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCamelCase_ = max(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if max_product > largest:
lowerCamelCase_ = max_product
return largest
def lowerCamelCase_ ( ):
lowerCamelCase_ = []
with open(os.path.dirname(lowerCamelCase__ ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
lowerCamelCase_ = [[int(lowerCamelCase__ ) for i in grid[j]] for j in range(len(lowerCamelCase__ ) )]
return largest_product(lowerCamelCase__ )
if __name__ == "__main__":
print(solution())
| 47
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.