code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'yolos'
def __init__(self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=[512, 864] , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=100 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=1 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=0.1 , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = num_detection_tokens
_lowerCAmelCase = use_mid_position_embeddings
_lowerCAmelCase = auxiliary_loss
# Hungarian matcher
_lowerCAmelCase = class_cost
_lowerCAmelCase = bbox_cost
_lowerCAmelCase = giou_cost
# Loss coefficients
_lowerCAmelCase = bbox_loss_coefficient
_lowerCAmelCase = giou_loss_coefficient
_lowerCAmelCase = eos_coefficient
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = version.parse('1.11' )
@property
def A__ (self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ (self ):
'''simple docstring'''
return 1e-4
@property
def A__ (self ):
'''simple docstring'''
return 12
| 156
|
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase ( __lowercase ):
@slow
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
_lowerCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_lowerCAmelCase = bertabert.config.encoder.vocab_size
_lowerCAmelCase = tokenizer.sep_token_id
_lowerCAmelCase = tokenizer.cls_token_id
_lowerCAmelCase = 128
_lowerCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
_lowerCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
_lowerCAmelCase = train_dataset.select(range(32 ) )
_lowerCAmelCase = val_dataset.select(range(16 ) )
_lowerCAmelCase = 4
def _map_to_encoder_decoder_inputs(lowerCamelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCAmelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=lowerCamelCase , max_length=512 )
_lowerCAmelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=lowerCamelCase , max_length=128 )
_lowerCAmelCase = inputs.input_ids
_lowerCAmelCase = inputs.attention_mask
_lowerCAmelCase = outputs.input_ids
_lowerCAmelCase = outputs.input_ids.copy()
_lowerCAmelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_lowerCAmelCase = outputs.attention_mask
assert all(len(lowerCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(lowerCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCamelCase ):
_lowerCAmelCase = pred.label_ids
_lowerCAmelCase = pred.predictions
# all unnecessary tokens are removed
_lowerCAmelCase = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
_lowerCAmelCase = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
_lowerCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCamelCase ) )] ) / len(lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
_lowerCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCamelCase , batch_size=lowerCamelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_lowerCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCamelCase , batch_size=lowerCamelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = SeqaSeqTrainingArguments(
output_dir=lowerCamelCase , per_device_train_batch_size=lowerCamelCase , per_device_eval_batch_size=lowerCamelCase , predict_with_generate=lowerCamelCase , evaluation_strategy="""steps""" , do_train=lowerCamelCase , do_eval=lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowerCAmelCase = SeqaSeqTrainer(
model=lowerCamelCase , args=lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , tokenizer=lowerCamelCase , )
# start training
trainer.train()
| 156
| 1
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def A_( A : np.ndarray):
return input_array.reshape((input_array.size, 1))
def A_( A : np.ndarray , A : np.ndarray , A : int):
UpperCamelCase = np.nan
for i in range(_A):
UpperCamelCase = features[:, labels == i]
UpperCamelCase = data.mean(1)
# Centralize the data of class i
UpperCamelCase = data - column_reshape(_A)
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_A , centered_data.T)
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase = np.dot(_A , centered_data.T)
return covariance_sum / features.shape[1]
def A_( A : np.ndarray , A : np.ndarray , A : int):
UpperCamelCase = features.mean(1)
UpperCamelCase = np.nan
for i in range(_A):
UpperCamelCase = features[:, labels == i]
UpperCamelCase = data.shape[1]
UpperCamelCase = data.mean(1)
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_A) - column_reshape(_A) , (column_reshape(_A) - column_reshape(_A)).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase = device_data * np.dot(
column_reshape(_A) - column_reshape(_A) , (column_reshape(_A) - column_reshape(_A)).T , )
return covariance_sum / features.shape[1]
def A_( A : np.ndarray , A : int):
# Check if the features have been loaded
if features.any():
UpperCamelCase = features.mean(1)
# Center the dataset
UpperCamelCase = features - np.reshape(_A , (data_mean.size, 1))
UpperCamelCase = np.dot(_A , centered_data.T) / features.shape[1]
UpperCamelCase , UpperCamelCase = np.linalg.eigh(_A)
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase = np.dot(filtered_eigenvectors.T , _A)
logging.info('Principal Component Analysis computed')
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=_A)
logging.error('Dataset empty')
raise AssertionError
def A_( A : np.ndarray , A : np.ndarray , A : int , A : int):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase , UpperCamelCase = eigh(
covariance_between_classes(_A , _A , _A) , covariance_within_classes(_A , _A , _A) , )
UpperCamelCase = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase , UpperCamelCase , UpperCamelCase = np.linalg.svd(_A)
UpperCamelCase = svd_matrix[:, 0:dimensions]
UpperCamelCase = np.dot(filtered_svd_matrix.T , _A)
logging.info('Linear Discriminant Analysis computed')
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=_A)
logging.error('Dataset empty')
raise AssertionError
def A_( ):
UpperCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]])
UpperCamelCase = np.array([0, 0, 0, 1, 1])
UpperCamelCase = 2
UpperCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_A) as error_info:
UpperCamelCase = linear_discriminant_analysis(
_A , _A , _A , _A)
if isinstance(_A , np.ndarray):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes')
assert error_info.type is AssertionError
def A_( ):
UpperCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
UpperCamelCase = 2
UpperCamelCase = np.array([[6.92_820_323, 8.66_025_404, 10.3923_0485], [3.0, 3.0, 3.0]])
with pytest.raises(_A) as error_info:
UpperCamelCase = principal_component_analysis(_A , _A)
if not np.allclose(_A , _A):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Union[str, Any] = 16
lowerCAmelCase : Any = 32
def A_( A : Accelerator , A : int = 16):
UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase = load_dataset('glue' , 'mrpc')
def tokenize_function(A : Dict):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A , max_length=A)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
A , batched=A , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(A : int):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
A , padding='longest' , max_length=A , pad_to_multiple_of=A , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=A , collate_fn=A , batch_size=A)
UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=A , collate_fn=A , batch_size=A)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : int = mocked_dataloaders # noqa: F811
def A_( A : List[str] , A : Dict):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A) == "1":
UpperCamelCase = 2
# New Code #
UpperCamelCase = int(args.gradient_accumulation_steps)
# Initialize accelerator
UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=A)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config['lr']
UpperCamelCase = int(config['num_epochs'])
UpperCamelCase = int(config['seed'])
UpperCamelCase = int(config['batch_size'])
UpperCamelCase = evaluate.load('glue' , 'mrpc')
set_seed(A)
UpperCamelCase , UpperCamelCase = get_dataloaders(A , A)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=A)
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=100 , num_training_steps=(len(A) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
A , A , A , A , A)
# Now we train the model
for epoch in range(A):
model.train()
for step, batch in enumerate(A):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(A):
UpperCamelCase = model(**A)
UpperCamelCase = output.loss
accelerator.backward(A)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase = model(**A)
UpperCamelCase = outputs.logits.argmax(dim=-1)
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=A , references=A , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A)
def A_( ):
UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=A , default=A , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=A , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase = parser.parse_args()
UpperCamelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A , A)
if __name__ == "__main__":
main()
| 432
| 0
|
"""simple docstring"""
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 65
|
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase : List[str] = 8.988e9 # units = N * m^s * C^-2
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
__UpperCAmelCase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__UpperCAmelCase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__UpperCAmelCase = abs(UpperCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__UpperCAmelCase = abs(UpperCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__UpperCAmelCase = (COULOMBS_CONSTANT * charge_product / abs(UpperCamelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262
| 0
|
from math import factorial
def UpperCamelCase (lowercase_: int = 100 ) -> int:
return sum(map(lowercase_ , str(factorial(lowercase_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 64
|
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> str:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(snake_case , snake_case ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
UpperCAmelCase__ : str = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 438
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase__ : str = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : Optional[int] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ : int = model(snake_case__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case__ , atol=1e-3 ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCAmelCase__ : Tuple = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ : Union[str, Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(snake_case__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case__ , atol=1e-3 ) )
| 438
| 1
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
_lowercase = text_generator("This is a test" , do_sample=__A )
self.assertEqual(
__A , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
_lowercase = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
__A , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
_lowercase = text_generator("This is a test" , do_sample=__A , num_return_sequences=2 , return_tensors=__A )
self.assertEqual(
__A , [
{"generated_token_ids": ANY(__A )},
{"generated_token_ids": ANY(__A )},
] , )
_lowercase = text_generator.model.config.eos_token_id
_lowercase = "<pad>"
_lowercase = text_generator(
["This is a test", "This is a second test"] , do_sample=__A , num_return_sequences=2 , batch_size=2 , return_tensors=__A , )
self.assertEqual(
__A , [
[
{"generated_token_ids": ANY(__A )},
{"generated_token_ids": ANY(__A )},
],
[
{"generated_token_ids": ANY(__A )},
{"generated_token_ids": ANY(__A )},
],
] , )
@require_tf
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
_lowercase = text_generator("This is a test" , do_sample=__A )
self.assertEqual(
__A , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
_lowercase = text_generator(["This is a test", "This is a second test"] , do_sample=__A )
self.assertEqual(
__A , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def snake_case ( self : Tuple , __A : Optional[Any] , __A : List[Any] , __A : Optional[int] ):
"""simple docstring"""
_lowercase = TextGenerationPipeline(model=__A , tokenizer=__A )
return text_generator, ["This is a test", "Another test"]
def snake_case ( self : List[Any] ):
"""simple docstring"""
_lowercase = "Hello I believe in"
_lowercase = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
_lowercase = text_generator(__A )
self.assertEqual(
__A , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
_lowercase = text_generator(__A , stop_sequence=" fe" )
self.assertEqual(__A , [{"generated_text": "Hello I believe in fe"}] )
def snake_case ( self : int , __A : Union[str, Any] , __A : str ):
"""simple docstring"""
_lowercase = text_generator.model
_lowercase = text_generator.tokenizer
_lowercase = text_generator("This is a test" )
self.assertEqual(__A , [{"generated_text": ANY(__A )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_lowercase = text_generator("This is a test" , return_full_text=__A )
self.assertEqual(__A , [{"generated_text": ANY(__A )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_lowercase = pipeline(task="text-generation" , model=__A , tokenizer=__A , return_full_text=__A )
_lowercase = text_generator("This is a test" )
self.assertEqual(__A , [{"generated_text": ANY(__A )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_lowercase = text_generator("This is a test" , return_full_text=__A )
self.assertEqual(__A , [{"generated_text": ANY(__A )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_lowercase = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__A )
self.assertEqual(
__A , [
[{"generated_text": ANY(__A )}, {"generated_text": ANY(__A )}],
[{"generated_text": ANY(__A )}, {"generated_text": ANY(__A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_lowercase = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__A )
self.assertEqual(
__A , [
[{"generated_text": ANY(__A )}, {"generated_text": ANY(__A )}],
[{"generated_text": ANY(__A )}, {"generated_text": ANY(__A )}],
] , )
with self.assertRaises(__A ):
_lowercase = text_generator("test" , return_full_text=__A , return_text=__A )
with self.assertRaises(__A ):
_lowercase = text_generator("test" , return_full_text=__A , return_tensors=__A )
with self.assertRaises(__A ):
_lowercase = text_generator("test" , return_text=__A , return_tensors=__A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_lowercase = text_generator("" )
self.assertEqual(__A , [{"generated_text": ANY(__A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_lowercase = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_lowercase = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 5_0_0 , max_new_tokens=2_0 )
_lowercase = text_generator("This is a test" * 5_0_0 , handle_long_generation="hole" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__A ):
text_generator(
"This is a test" * 5_0_0 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
_lowercase = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_lowercase = pipe("This is a test" )
self.assertEqual(
__A , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_lowercase = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_lowercase = pipe("This is a test" )
self.assertEqual(
__A , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_lowercase = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_lowercase = pipe("This is a test" )
self.assertEqual(
__A , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def snake_case ( self : List[str] ):
"""simple docstring"""
import torch
_lowercase = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def snake_case ( self : int ):
"""simple docstring"""
import torch
_lowercase = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=__A , top_p=0.5 )
def snake_case ( self : List[Any] ):
"""simple docstring"""
_lowercase = "Hello world"
_lowercase = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
_lowercase = logging.get_logger("transformers.generation.tf_utils" )
else:
_lowercase = logging.get_logger("transformers.generation.utils" )
_lowercase = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__A ) as cl:
_lowercase = text_generator(__A , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__A ) as cl:
_lowercase = text_generator(__A , max_new_tokens=1 )
self.assertNotIn(__A , cl.out )
with CaptureLogger(__A ) as cl:
_lowercase = text_generator(__A , max_length=1_0 )
self.assertNotIn(__A , cl.out )
| 706
|
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __A : Union[str, Any] ):
"""simple docstring"""
_lowercase = parent
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return {}
def A__ ( ) -> str:
_lowercase = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
_lowercase = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class UpperCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = MarkupLMFeatureExtractor if is_bsa_available() else None
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase = MarkupLMFeatureExtractionTester(self )
@property
def snake_case ( self : List[str] ):
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def snake_case ( self : Dict ):
"""simple docstring"""
# Initialize feature_extractor
_lowercase = self.feature_extraction_class()
# Test not batched input
_lowercase = get_html_strings()[0]
_lowercase = feature_extractor(__A )
# fmt: off
_lowercase = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
_lowercase = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , __A )
self.assertEqual(encoding.xpaths , __A )
# Test batched
_lowercase = get_html_strings()
_lowercase = feature_extractor(__A )
# fmt: off
_lowercase = expected_nodes + [["My First Heading", "My first paragraph."]]
_lowercase = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __A )
self.assertEqual(encoding.xpaths , __A )
| 602
| 0
|
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :float , a_ :float , a_ :float , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError('''You cannot supply more or less than 2 values''')
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''')
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''')
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''')
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 52
| 1
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase_ ( ) -> Dict:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowerCamelCase = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , UpperCamelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase_ ( ) -> Dict:
assert _test_patching.open is open
__lowerCamelCase = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , UpperCamelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase_ ( ) -> int:
__lowerCamelCase = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , UpperCamelCase__ ):
pass
def lowerCamelCase_ ( ) -> Union[str, Any]:
__lowerCamelCase = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , UpperCamelCase__ ) is None
with patch_submodule(_test_patching , 'len' , UpperCamelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase_ ( ) -> int:
__lowerCamelCase = '__test_patch_submodule_start_and_stop_mock__'
__lowerCamelCase = patch_submodule(_test_patching , 'open' , UpperCamelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase_ ( ) -> Any:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowerCamelCase = '__test_patch_submodule_successive_join__'
__lowerCamelCase = '__test_patch_submodule_successive_dirname__'
__lowerCamelCase = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.rename' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , UpperCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.join' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , UpperCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase_ ( ) -> Dict:
__lowerCamelCase = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , UpperCamelCase__ ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , UpperCamelCase__ ):
pass
| 717
|
def lowerCamelCase_ ( UpperCamelCase__ : int = 1000 ) -> int:
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 167
| 0
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase_ = '''scheduler_config.json'''
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : Dict = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : int = 5
SCREAMING_SNAKE_CASE : Optional[Any] = 6
SCREAMING_SNAKE_CASE : Dict = 7
SCREAMING_SNAKE_CASE : Optional[int] = 8
SCREAMING_SNAKE_CASE : Union[str, Any] = 9
SCREAMING_SNAKE_CASE : Any = 10
SCREAMING_SNAKE_CASE : Optional[int] = 11
SCREAMING_SNAKE_CASE : int = 12
SCREAMING_SNAKE_CASE : Any = 13
SCREAMING_SNAKE_CASE : Any = 14
@dataclass
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : torch.FloatTensor
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SCHEDULER_CONFIG_NAME
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Optional[int] = True
@classmethod
def snake_case__( cls : int , _UpperCamelCase : Dict[str, Any] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Dict=False , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
snake_case_, snake_case_, snake_case_ = cls.load_config(
pretrained_model_name_or_path=_UpperCamelCase , subfolder=_UpperCamelCase , return_unused_kwargs=_UpperCamelCase , return_commit_hash=_UpperCamelCase , **_UpperCamelCase , )
return cls.from_config(_UpperCamelCase , return_unused_kwargs=_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Optional[int] , _UpperCamelCase : Union[str, os.PathLike] , _UpperCamelCase : bool = False , **_UpperCamelCase : Union[str, Any] ) ->Any:
self.save_config(save_directory=_UpperCamelCase , push_to_hub=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Dict ) ->List[str]:
return self._get_compatibles()
@classmethod
def snake_case__( cls : List[Any] ) ->List[Any]:
snake_case_ = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ = importlib.import_module(__name__.split('''.''' )[0] )
snake_case_ = [
getattr(_UpperCamelCase , _UpperCamelCase ) for c in compatible_classes_str if hasattr(_UpperCamelCase , _UpperCamelCase )
]
return compatible_classes
| 39
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "informer"
SCREAMING_SNAKE_CASE : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = None , _UpperCamelCase : Optional[Union[str, bool]] = "mean" , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 6_4 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.05 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : Dict=True , _UpperCamelCase : str = "prob" , _UpperCamelCase : int = 5 , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]:
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Optional[Any] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 39
| 1
|
import argparse
import os
import re
import packaging.version
lowerCamelCase__ = '''examples/'''
lowerCamelCase__ = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCamelCase__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCamelCase__ = '''README.md'''
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ : str =f.read()
snake_case__, snake_case__ : Any =REPLACE_PATTERNS[pattern]
snake_case__ : Any =replace.replace('''VERSION''' , SCREAMING_SNAKE_CASE )
snake_case__ : Tuple =re_pattern.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
def lowercase_ ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , pattern='''examples''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE )
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : Tuple ='''🤗 Transformers currently provides the following architectures'''
snake_case__ : str ='''1. Want to contribute a new model?'''
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ : Union[str, Any] =f.readlines()
# Find the start of the list.
snake_case__ : str =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case__ : Optional[Any] =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
snake_case__ : List[Any] =lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(SCREAMING_SNAKE_CASE )
def lowercase_ ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
snake_case__ : Any =f.read()
snake_case__ : Tuple =REPLACE_PATTERNS['''init'''][0].search(SCREAMING_SNAKE_CASE ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE )
def lowercase_ ( SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
snake_case__ : Optional[int] =get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
snake_case__ : Union[str, Any] =default_version.base_version
elif patch:
snake_case__ : List[str] =F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
snake_case__ : Dict =F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
snake_case__ : List[Any] =input(F'''Which version are you releasing? [{default_version}]''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
snake_case__ : List[str] =default_version
print(F'''Updating version to {version}.''' )
global_version_update(SCREAMING_SNAKE_CASE , patch=SCREAMING_SNAKE_CASE )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : Union[str, Any] =get_version()
snake_case__ : Optional[Any] =F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
snake_case__ : List[str] =current_version.base_version
# Check with the user we got that right.
snake_case__ : List[str] =input(F'''Which version are we developing now? [{dev_version}]''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
snake_case__ : List[str] =dev_version
print(F'''Updating version to {version}.''' )
global_version_update(SCREAMING_SNAKE_CASE )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCamelCase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 408
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ =42
lowerCAmelCase__ =jnp.floataa
lowerCAmelCase__ =True
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setup()
snake_case__ : int =nn.Dense(5 , dtype=self.dtype )
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : int =super().__call__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ =FlaxBigBirdForNaturalQuestionsModule
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
def cross_entropy(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
snake_case__ : Optional[Any] =logits.shape[-1]
snake_case__ : List[str] =(labels[..., None] == jnp.arange(SCREAMING_SNAKE_CASE )[None]).astype('''f4''' )
snake_case__ : str =jax.nn.log_softmax(SCREAMING_SNAKE_CASE , axis=-1 )
snake_case__ : Any =-jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
snake_case__ : Tuple =reduction(SCREAMING_SNAKE_CASE )
return loss
snake_case__ : List[Any] =partial(SCREAMING_SNAKE_CASE , reduction=jnp.mean )
snake_case__ : Optional[int] =cross_entropy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : Any =cross_entropy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : int =cross_entropy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase__ ="google/bigbird-roberta-base"
lowerCAmelCase__ =3_000
lowerCAmelCase__ =10_500
lowerCAmelCase__ =128
lowerCAmelCase__ =3
lowerCAmelCase__ =1
lowerCAmelCase__ =5
# tx_args
lowerCAmelCase__ =3e-5
lowerCAmelCase__ =0.0
lowerCAmelCase__ =20_000
lowerCAmelCase__ =0.0_0_9_5
lowerCAmelCase__ ="bigbird-roberta-natural-questions"
lowerCAmelCase__ ="training-expt"
lowerCAmelCase__ ="data/nq-training.jsonl"
lowerCAmelCase__ ="data/nq-validation.jsonl"
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=__SCREAMING_SNAKE_CASE )
snake_case__ : int =os.path.join(self.base_dir , self.save_dir )
snake_case__ : Union[str, Any] =self.batch_size_per_device * jax.device_count()
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase__ =42
lowerCAmelCase__ =4_096 # no dynamic padding on TPUs
def __call__( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict =self.collate_fn(__SCREAMING_SNAKE_CASE )
snake_case__ : str =jax.tree_util.tree_map(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return batch
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
snake_case__, snake_case__ : Tuple =self.fetch_inputs(features['''input_ids'''] )
snake_case__ : str ={
'''input_ids''': jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
snake_case__ : List[Any] =[self._fetch_inputs(__SCREAMING_SNAKE_CASE ) for ids in input_ids]
return zip(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] =[1 for _ in range(len(__SCREAMING_SNAKE_CASE ) )]
while len(__SCREAMING_SNAKE_CASE ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=None ):
"""simple docstring"""
if seed is not None:
snake_case__ : Union[str, Any] =dataset.shuffle(seed=SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) // batch_size ):
snake_case__ : Any =dataset[i * batch_size : (i + 1) * batch_size]
yield dict(SCREAMING_SNAKE_CASE )
@partial(jax.pmap , axis_name='''batch''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
def loss_fn(SCREAMING_SNAKE_CASE : int ):
snake_case__ : Any =model_inputs.pop('''start_labels''' )
snake_case__ : List[Any] =model_inputs.pop('''end_labels''' )
snake_case__ : List[Any] =model_inputs.pop('''pooled_labels''' )
snake_case__ : str =state.apply_fn(**SCREAMING_SNAKE_CASE , params=SCREAMING_SNAKE_CASE , dropout_rng=SCREAMING_SNAKE_CASE , train=SCREAMING_SNAKE_CASE )
snake_case__, snake_case__, snake_case__ : int =outputs
return state.loss_fn(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
snake_case__, snake_case__ : Optional[Any] =jax.random.split(SCREAMING_SNAKE_CASE )
snake_case__ : int =jax.value_and_grad(SCREAMING_SNAKE_CASE )
snake_case__, snake_case__ : int =grad_fn(state.params )
snake_case__ : List[str] =jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
snake_case__ : List[str] =jax.lax.pmean(SCREAMING_SNAKE_CASE , '''batch''' )
snake_case__ : Optional[Any] =state.apply_gradients(grads=SCREAMING_SNAKE_CASE )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
snake_case__ : int =model_inputs.pop('''start_labels''' )
snake_case__ : Tuple =model_inputs.pop('''end_labels''' )
snake_case__ : Optional[int] =model_inputs.pop('''pooled_labels''' )
snake_case__ : Any =state.apply_fn(**SCREAMING_SNAKE_CASE , params=state.params , train=SCREAMING_SNAKE_CASE )
snake_case__, snake_case__, snake_case__ : Optional[Any] =outputs
snake_case__ : Optional[int] =state.loss_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : int =jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class _lowerCAmelCase ( train_state.TrainState ):
"""simple docstring"""
lowerCAmelCase__ =struct.field(pytree_node=__UpperCamelCase )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase__ =42
lowerCAmelCase__ =42
lowerCAmelCase__ =42
lowerCAmelCase__ =42
lowerCAmelCase__ =42
lowerCAmelCase__ =42
lowerCAmelCase__ =None
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
snake_case__ : List[Any] =model.params
snake_case__ : str =TrainState.create(
apply_fn=model.__call__ , params=__SCREAMING_SNAKE_CASE , tx=__SCREAMING_SNAKE_CASE , loss_fn=__SCREAMING_SNAKE_CASE , )
if ckpt_dir is not None:
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : int =restore_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any ={
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
snake_case__, snake_case__ : Union[str, Any] =build_tx(**__SCREAMING_SNAKE_CASE )
snake_case__ : str =train_state.TrainState(
step=__SCREAMING_SNAKE_CASE , apply_fn=model.__call__ , params=__SCREAMING_SNAKE_CASE , tx=__SCREAMING_SNAKE_CASE , opt_state=__SCREAMING_SNAKE_CASE , )
snake_case__ : Optional[int] =args
snake_case__ : Union[str, Any] =data_collator
snake_case__ : Any =lr
snake_case__ : Tuple =params
snake_case__ : Tuple =jax_utils.replicate(__SCREAMING_SNAKE_CASE )
return state
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] =self.args
snake_case__ : List[str] =len(__SCREAMING_SNAKE_CASE ) // args.batch_size
snake_case__ : str =jax.random.PRNGKey(0 )
snake_case__ : List[Any] =jax.random.split(__SCREAMING_SNAKE_CASE , jax.device_count() )
for epoch in range(args.max_epochs ):
snake_case__ : Optional[Any] =jnp.array(0 , dtype=jnp.floataa )
snake_case__ : int =get_batched_dataset(__SCREAMING_SNAKE_CASE , args.batch_size , seed=__SCREAMING_SNAKE_CASE )
snake_case__ : Any =0
for batch in tqdm(__SCREAMING_SNAKE_CASE , total=__SCREAMING_SNAKE_CASE , desc=f'''Running EPOCH-{epoch}''' ):
snake_case__ : Tuple =self.data_collator(__SCREAMING_SNAKE_CASE )
snake_case__, snake_case__, snake_case__ : int =self.train_step_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
snake_case__ : List[str] =jax_utils.unreplicate(state.step )
snake_case__ : Optional[int] =running_loss.item() / i
snake_case__ : Dict =self.scheduler_fn(state_step - 1 )
snake_case__ : Optional[int] =self.evaluate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] ={
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(__SCREAMING_SNAKE_CASE ) )
self.logger.log(__SCREAMING_SNAKE_CASE , commit=__SCREAMING_SNAKE_CASE )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
snake_case__ : int =get_batched_dataset(__SCREAMING_SNAKE_CASE , self.args.batch_size )
snake_case__ : Any =len(__SCREAMING_SNAKE_CASE ) // self.args.batch_size
snake_case__ : List[Any] =jnp.array(0 , dtype=jnp.floataa )
snake_case__ : Optional[Any] =0
for batch in tqdm(__SCREAMING_SNAKE_CASE , total=__SCREAMING_SNAKE_CASE , desc='''Evaluating ... ''' ):
snake_case__ : Optional[int] =self.data_collator(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =self.val_step_fn(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple =jax_utils.unreplicate(__SCREAMING_SNAKE_CASE )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=''' ... ''' )
self.model_save_fn(__SCREAMING_SNAKE_CASE , params=state.params )
with open(os.path.join(__SCREAMING_SNAKE_CASE , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__SCREAMING_SNAKE_CASE , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(__SCREAMING_SNAKE_CASE , '''data_collator.joblib''' ) )
with open(os.path.join(__SCREAMING_SNAKE_CASE , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , __SCREAMING_SNAKE_CASE )
print('''DONE''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''flax_model.msgpack''' ) , '''rb''' ) as f:
snake_case__ : Optional[Any] =from_bytes(state.params , f.read() )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''opt_state.msgpack''' ) , '''rb''' ) as f:
snake_case__ : Optional[Any] =from_bytes(state.opt_state , f.read() )
snake_case__ : Dict =joblib.load(os.path.join(SCREAMING_SNAKE_CASE , '''args.joblib''' ) )
snake_case__ : Union[str, Any] =joblib.load(os.path.join(SCREAMING_SNAKE_CASE , '''data_collator.joblib''' ) )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''training_state.json''' ) , '''r''' ) as f:
snake_case__ : Tuple =json.load(SCREAMING_SNAKE_CASE )
snake_case__ : Tuple =training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def lowercase_ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
snake_case__ : Tuple =num_train_steps - warmup_steps
snake_case__ : Union[str, Any] =optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE , end_value=SCREAMING_SNAKE_CASE , transition_steps=SCREAMING_SNAKE_CASE )
snake_case__ : Dict =optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE , end_value=1E-7 , transition_steps=SCREAMING_SNAKE_CASE )
snake_case__ : int =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
def weight_decay_mask(SCREAMING_SNAKE_CASE : Tuple ):
snake_case__ : Any =traverse_util.flatten_dict(SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] ={k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(SCREAMING_SNAKE_CASE )
snake_case__ : Any =scheduler_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =optax.adamw(learning_rate=SCREAMING_SNAKE_CASE , weight_decay=SCREAMING_SNAKE_CASE , mask=SCREAMING_SNAKE_CASE )
return tx, lr
| 408
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__lowercase : str = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_snake_case , cache_dir=_snake_case )
__lowercase : List[Any] = [t[-1] for t in os.walk(os.path.join(_snake_case , os.listdir(_snake_case )[0] , '''snapshots''' ) )]
__lowercase : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : str ):
__lowercase , __lowercase : int = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_snake_case )
__lowercase : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase : Optional[int] = jax.random.PRNGKey(0 )
__lowercase : str = 4
__lowercase : List[Any] = jax.device_count()
__lowercase : Optional[Any] = num_samples * [prompt]
__lowercase : Union[str, Any] = pipeline.prepare_inputs(_snake_case )
# shard inputs and rng
__lowercase : int = replicate(_snake_case )
__lowercase : List[Any] = jax.random.split(_snake_case , _snake_case )
__lowercase : Any = shard(_snake_case )
__lowercase : Optional[Any] = pipeline(_snake_case , _snake_case , _snake_case , _snake_case , jit=_snake_case ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(_snake_case , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
__lowercase : Dict = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_snake_case ) == num_samples
def snake_case_ ( self : Optional[int] ):
__lowercase , __lowercase : int = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=_snake_case )
__lowercase : int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase : str = jax.random.PRNGKey(0 )
__lowercase : List[str] = 50
__lowercase : Union[str, Any] = jax.device_count()
__lowercase : Optional[int] = num_samples * [prompt]
__lowercase : str = pipeline.prepare_inputs(_snake_case )
# shard inputs and rng
__lowercase : Optional[int] = replicate(_snake_case )
__lowercase : Tuple = jax.random.split(_snake_case , _snake_case )
__lowercase : List[Any] = shard(_snake_case )
__lowercase : Optional[int] = pipeline(_snake_case , _snake_case , _snake_case , _snake_case , jit=_snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(_snake_case , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def snake_case_ ( self : Tuple ):
__lowercase , __lowercase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_snake_case )
__lowercase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase : List[str] = jax.random.PRNGKey(0 )
__lowercase : Optional[int] = 50
__lowercase : Union[str, Any] = jax.device_count()
__lowercase : Optional[int] = num_samples * [prompt]
__lowercase : Dict = pipeline.prepare_inputs(_snake_case )
# shard inputs and rng
__lowercase : str = replicate(_snake_case )
__lowercase : List[str] = jax.random.split(_snake_case , _snake_case )
__lowercase : List[str] = shard(_snake_case )
__lowercase : Optional[int] = pipeline(_snake_case , _snake_case , _snake_case , _snake_case , jit=_snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(_snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def snake_case_ ( self : Optional[Any] ):
__lowercase , __lowercase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
__lowercase : Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase : int = jax.random.PRNGKey(0 )
__lowercase : str = 50
__lowercase : Optional[Any] = jax.device_count()
__lowercase : Optional[int] = num_samples * [prompt]
__lowercase : Any = pipeline.prepare_inputs(_snake_case )
# shard inputs and rng
__lowercase : Any = replicate(_snake_case )
__lowercase : Dict = jax.random.split(_snake_case , _snake_case )
__lowercase : Any = shard(_snake_case )
__lowercase : str = pipeline(_snake_case , _snake_case , _snake_case , _snake_case , jit=_snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(_snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def snake_case_ ( self : Union[str, Any] ):
__lowercase : List[str] = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=_snake_case , steps_offset=1 , )
__lowercase , __lowercase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=_snake_case , safety_checker=_snake_case , )
__lowercase : Optional[int] = scheduler.create_state()
__lowercase : Any = scheduler_state
__lowercase : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase : str = jax.random.PRNGKey(0 )
__lowercase : List[str] = 50
__lowercase : List[Any] = jax.device_count()
__lowercase : List[str] = num_samples * [prompt]
__lowercase : str = pipeline.prepare_inputs(_snake_case )
# shard inputs and rng
__lowercase : Tuple = replicate(_snake_case )
__lowercase : Any = jax.random.split(_snake_case , _snake_case )
__lowercase : Optional[int] = shard(_snake_case )
__lowercase : Dict = pipeline(_snake_case , _snake_case , _snake_case , _snake_case , jit=_snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(_snake_case , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def snake_case_ ( self : Optional[Any] ):
__lowercase : Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase : Optional[int] = jax.device_count()
__lowercase : int = num_samples * [prompt]
__lowercase : Optional[Any] = jax.random.split(jax.random.PRNGKey(0 ) , _snake_case )
__lowercase , __lowercase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_snake_case , )
__lowercase : str = replicate(_snake_case )
__lowercase : Any = pipeline.prepare_inputs(_snake_case )
__lowercase : Dict = shard(_snake_case )
__lowercase : Optional[int] = pipeline(_snake_case , _snake_case , _snake_case , jit=_snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__lowercase : List[Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__lowercase , __lowercase : int = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_snake_case , use_memory_efficient_attention=_snake_case , )
__lowercase : Tuple = replicate(_snake_case )
__lowercase : List[str] = pipeline.prepare_inputs(_snake_case )
__lowercase : Any = shard(_snake_case )
__lowercase : str = pipeline(_snake_case , _snake_case , _snake_case , jit=_snake_case ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__lowercase : List[Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 509
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Any = '''open-llama'''
def __init__( self : Optional[int] , _snake_case : Tuple=10_0000 , _snake_case : Optional[int]=4096 , _snake_case : Any=1_1008 , _snake_case : Any=32 , _snake_case : Optional[Any]=32 , _snake_case : List[str]="silu" , _snake_case : Tuple=2048 , _snake_case : Any=0.02 , _snake_case : Optional[Any]=1E-6 , _snake_case : Any=True , _snake_case : Any=0 , _snake_case : Tuple=1 , _snake_case : str=2 , _snake_case : List[str]=False , _snake_case : List[str]=True , _snake_case : Tuple=0.1 , _snake_case : Tuple=0.1 , _snake_case : Tuple=True , _snake_case : List[Any]=True , _snake_case : List[str]=None , **_snake_case : str , ):
__lowercase : Tuple = vocab_size
__lowercase : List[str] = max_position_embeddings
__lowercase : int = hidden_size
__lowercase : List[str] = intermediate_size
__lowercase : Optional[Any] = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : List[str] = hidden_act
__lowercase : Any = initializer_range
__lowercase : str = rms_norm_eps
__lowercase : int = use_cache
__lowercase : str = kwargs.pop(
'''use_memorry_efficient_attention''' , _snake_case )
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_dropout_prob
__lowercase : str = use_stable_embedding
__lowercase : Any = shared_input_output_embedding
__lowercase : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , )
def snake_case_ ( self : Any ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'got {self.rope_scaling}' )
__lowercase : List[str] = self.rope_scaling.get('''type''' , _snake_case )
__lowercase : Tuple = self.rope_scaling.get('''factor''' , _snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 509
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """naver-clova-ix/donut-base-finetuned-docvqa"""
__snake_case = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
__snake_case = """document_qa"""
__snake_case = AutoProcessor
__snake_case = VisionEncoderDecoderModel
__snake_case = ["""image""", """text"""]
__snake_case = ["""text"""]
def __init__( self: Dict , *a: List[Any] , **a: List[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*a , **a )
def _snake_case ( self: str , a: "Image" , a: str ):
__lowerCamelCase : str = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__lowerCamelCase : Dict = task_prompt.replace('{user_input}' , a )
__lowerCamelCase : Optional[Any] = self.pre_processor.tokenizer(
a , add_special_tokens=a , return_tensors='pt' ).input_ids
__lowerCamelCase : Union[str, Any] = self.pre_processor(a , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case ( self: Optional[Any] , a: Tuple ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a , ).sequences
def _snake_case ( self: Optional[Any] , a: Any ):
__lowerCamelCase : Union[str, Any] = self.pre_processor.batch_decode(a )[0]
__lowerCamelCase : List[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__lowerCamelCase : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__lowerCamelCase : Optional[int] = re.sub(R'<.*?>' , '' , a , count=1 ).strip() # remove first task start token
__lowerCamelCase : int = self.pre_processor.tokenajson(a )
return sequence["answer"]
| 712
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
@dataclass
class A_ :
'''simple docstring'''
__snake_case = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__snake_case = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__snake_case = field(default=__UpperCamelCase , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class A_ :
'''simple docstring'''
__snake_case = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
__snake_case = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def UpperCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
__lowerCamelCase : List[Any] = import_module('tasks' )
try:
__lowerCamelCase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , model_args.task_type )
__lowerCamelCase : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__lowerCamelCase : Dict = token_classification_task.get_labels(data_args.labels )
__lowerCamelCase : Dict[int, str] = dict(enumerate(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} , cache_dir=model_args.cache_dir , )
__lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__lowerCamelCase : List[Any] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , )
# Get datasets
__lowerCamelCase : Dict = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE__ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__lowerCamelCase : Any = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE__ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple[List[int], List[int]]:
__lowerCamelCase : Any = np.argmax(SCREAMING_SNAKE_CASE__ , axis=2 )
__lowerCamelCase , __lowerCamelCase : List[str] = preds.shape
__lowerCamelCase : List[str] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
__lowerCamelCase : List[str] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE__ ) -> Dict:
__lowerCamelCase , __lowerCamelCase : List[str] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
"precision": precision_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
"recall": recall_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
"f1": fa_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
}
# Data collator
__lowerCamelCase : Any = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__lowerCamelCase : Union[str, Any] = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCamelCase : int = trainer.evaluate()
__lowerCamelCase : Optional[int] = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
writer.write('%s = %s\n' % (key, value) )
results.update(SCREAMING_SNAKE_CASE__ )
# Predict
if training_args.do_predict:
__lowerCamelCase : Optional[Any] = TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE__ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = trainer.predict(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = align_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__lowerCamelCase : Union[str, Any] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return results
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 230
| 0
|
def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
if not (isinstance(a__ , a__ ) and isinstance(a__ , a__ )):
raise ValueError("longest_common_substring() takes two strings for inputs" )
_UpperCamelCase = len(a__ )
_UpperCamelCase = len(a__ )
_UpperCamelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_UpperCamelCase = 0
_UpperCamelCase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_UpperCamelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_UpperCamelCase = i
_UpperCamelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 547
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = ['''image_processor''', '''tokenizer''']
__A = '''ViTImageProcessor'''
__A = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Union[str, Any] , lowercase_ : Optional[int]=None , lowercase_ : Dict=None , **lowercase_ : List[str]) -> str:
"""simple docstring"""
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_UpperCamelCase = kwargs.pop("feature_extractor")
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(lowercase_ , lowercase_)
def __call__( self : Any , lowercase_ : List[str]=None , lowercase_ : int=None , lowercase_ : Optional[Any]=None , lowercase_ : Dict=None , **lowercase_ : Dict) -> int:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images.")
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt.")
if text is not None:
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if visual_prompt is not None:
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if images is not None:
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if visual_prompt is not None and images is not None:
_UpperCamelCase = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_UpperCamelCase = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowercase_) , tensor_type=lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , *lowercase_ : List[Any] , **lowercase_ : List[Any]) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : int) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 547
| 1
|
"""simple docstring"""
def _lowerCAmelCase(a : int = 200_0000 ) -> int:
_SCREAMING_SNAKE_CASE =[0 for i in range(n + 1 )]
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , a ):
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =0
for i in range(a ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"{solution() = }")
| 165
|
"""simple docstring"""
def _lowerCAmelCase(a : list ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_SCREAMING_SNAKE_CASE =grid[0]
for row_n in range(1 , len(a ) ):
_SCREAMING_SNAKE_CASE =grid[row_n]
_SCREAMING_SNAKE_CASE =fill_row(a , a )
_SCREAMING_SNAKE_CASE =grid[row_n]
return grid[-1][-1]
def _lowerCAmelCase(a : list , a : list ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(a ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Dict = 'visual_bert'
def __init__( self : Dict , A_ : List[str]=3_05_22 , A_ : Tuple=7_68 , A_ : Tuple=5_12 , A_ : Optional[int]=12 , A_ : List[Any]=12 , A_ : int=30_72 , A_ : str="gelu" , A_ : List[Any]=0.1 , A_ : str=0.1 , A_ : Any=5_12 , A_ : Tuple=2 , A_ : Dict=0.02 , A_ : Any=1e-1_2 , A_ : Any=False , A_ : List[Any]=True , A_ : Optional[Any]=1 , A_ : int=0 , A_ : Optional[int]=2 , **A_ : Optional[int] , )-> Union[str, Any]:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = visual_embedding_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = type_vocab_size
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = bypass_transformer
__UpperCamelCase = special_visual_initialize
| 505
|
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_A = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
_A = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
_A = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowercase (_snake_case ,_snake_case ) -> Any:
'''simple docstring'''
return float((preds == labels).mean() )
def lowercase (_snake_case ,_snake_case ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = simple_accuracy(_snake_case ,_snake_case )
__UpperCamelCase = float(fa_score(y_true=_snake_case ,y_pred=_snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase (_snake_case ,_snake_case ) -> Any:
'''simple docstring'''
__UpperCamelCase = float(pearsonr(_snake_case ,_snake_case )[0] )
__UpperCamelCase = float(spearmanr(_snake_case ,_snake_case )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def A ( self : Tuple )-> int:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def A ( self : str , A_ : Any , A_ : Tuple )-> Optional[Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
| 505
| 1
|
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : List[str] = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
_UpperCamelCase : Optional[int] = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def snake_case ( snake_case : int , snake_case : List[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase = int(re.match(r'.*layer_(\d*).*' , snake_case )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def snake_case ( snake_case : Optional[Any] ) -> str:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase = re.search(r'[^\d](\d+)$' , str(snake_case ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
lowerCAmelCase = int(bit_search.groups()[0] )
return bit_size // 8
def snake_case ( snake_case : List[Any] , snake_case : str , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
if bloom_config_file == "":
lowerCAmelCase = BloomConfig()
else:
lowerCAmelCase = BloomConfig.from_json_file(snake_case )
if shard_model:
lowerCAmelCase = os.listdir(snake_case )
lowerCAmelCase = sorted(filter(lambda snake_case : s.startswith('layer' ) and "model_00" in s , snake_case ) )
lowerCAmelCase = {'weight_map': {}, 'metadata': {}}
lowerCAmelCase = 0
lowerCAmelCase = None
lowerCAmelCase = BloomConfig()
for j, file in enumerate(snake_case ):
print('Processing file: {}'.format(snake_case ) )
lowerCAmelCase = None
for i in range(snake_case ):
# load all TP files
lowerCAmelCase = file.replace('model_00' , F'model_0{i}' )
lowerCAmelCase = torch.load(os.path.join(snake_case , snake_case ) , map_location='cpu' )
# Rename keys in the transformers names
lowerCAmelCase = list(temp.keys() )
for key in keys:
lowerCAmelCase = temp.pop(snake_case )
if tensors is None:
lowerCAmelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(snake_case ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=snake_case )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase = tensors[key] / pretraining_tp
torch.save(
snake_case , os.path.join(
snake_case , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(snake_case ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(snake_case ) ).zfill(5 ) )
lowerCAmelCase = BloomConfig()
lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCAmelCase = total_size
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(snake_case , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase = json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + '\n'
f.write(snake_case )
else:
lowerCAmelCase = BloomModel(snake_case )
lowerCAmelCase = os.listdir(snake_case )
lowerCAmelCase = sorted(filter(lambda snake_case : s.startswith('layer' ) and "model_00" in s , snake_case ) )
lowerCAmelCase = None
for i, file in enumerate(snake_case ):
lowerCAmelCase = None
for i in range(snake_case ):
# load all TP files
lowerCAmelCase = file.replace('model_00' , F'model_0{i}' )
lowerCAmelCase = torch.load(os.path.join(snake_case , snake_case ) , map_location='cpu' )
# Rename keys in the transformers names
lowerCAmelCase = list(temp.keys() )
for key in keys:
lowerCAmelCase = temp.pop(snake_case )
if tensors is None:
lowerCAmelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(snake_case ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=snake_case )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase = tensors[key] / pretraining_tp
lowerCAmelCase = model.load_state_dict(snake_case , strict=snake_case )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
lowerCAmelCase = set(other_keys.missing_keys )
else:
lowerCAmelCase = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(snake_case , exist_ok=snake_case )
lowerCAmelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
lowerCAmelCase = model.to(config.torch_dtype )
torch.save(model.state_dict() , snake_case )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
_UpperCamelCase : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 704
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_UpperCamelCase : Dict = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_UpperCamelCase : Optional[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def snake_case ( ) -> int:
"""simple docstring"""
lowerCAmelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCAmelCase = bs[:]
lowerCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case )
cs.append(2**8 + n )
n += 1
lowerCAmelCase = [chr(snake_case ) for n in cs]
return dict(zip(snake_case , snake_case ) )
def snake_case ( snake_case : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
return pairs
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else unk_token
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle:
lowerCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
lowerCAmelCase = errors # how to handle errors in decoding
lowerCAmelCase = bytes_to_unicode()
lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as merges_handle:
lowerCAmelCase = merges_handle.read().split('\n' )[1:-1]
lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
lowerCAmelCase = {}
lowerCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase = tuple(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
lowerCAmelCase = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
lowerCAmelCase = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase = tuple(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
lowerCAmelCase = get_pairs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = ' '.join(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = word
return word
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = []
for token in re.findall(self.pat , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_SCREAMING_SNAKE_CASE ).split(' ' ) )
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.decoder.get(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = ''.join(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + '\n' )
lowerCAmelCase = 0
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
lowerCAmelCase = token_index
writer.write(' '.join(_SCREAMING_SNAKE_CASE ) + '\n' )
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
lowerCAmelCase = ' ' + text
return (text, kwargs)
| 514
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase : Tuple ={
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int =["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] =["CLIPFeatureExtractor"]
_UpperCamelCase : Union[str, Any] =["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] =[
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple =[
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str =[
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Dict =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
|
'''simple docstring'''
import os
from pathlib import Path
def lowerCamelCase_ ( ):
from torch.utils.cpp_extension import load
__lowerCamelCase = Path(A_ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__lowerCamelCase = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , A_ , with_cuda=A_ , extra_include_paths=[str(A_ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 316
| 1
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :List[Any] = Dict[str, Any]
lowerCamelCase :Tuple = List[Prediction]
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , *lowercase , **lowercase ):
super().__init__(*lowercase , **lowercase )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a (self , **lowercase ):
A_ : List[str] = {}
if "threshold" in kwargs:
A_ : Optional[int] = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__(self , *lowercase , **lowercase ):
return super().__call__(*lowercase , **lowercase )
def _a (self , lowercase ):
A_ : Tuple = load_image(lowercase )
A_ : Tuple = torch.IntTensor([[image.height, image.width]] )
A_ : Optional[Any] = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
A_ : List[Any] = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
A_ : int = target_size
return inputs
def _a (self , lowercase ):
A_ : Union[str, Any] = model_inputs.pop("""target_size""" )
A_ : Any = self.model(**lowercase )
A_ : List[str] = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
A_ : Union[str, Any] = model_inputs["""bbox"""]
return model_outputs
def _a (self , lowercase , lowercase=0.9 ):
A_ : Dict = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_, A_ : Optional[int] = target_size[0].tolist()
def unnormalize(lowercase ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A_, A_ : Dict = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ : int = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ : List[Any] = [unnormalize(lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
A_ : List[str] = ["""score""", """label""", """box"""]
A_ : Optional[int] = [dict(zip(lowercase , lowercase ) ) for vals in zip(scores.tolist() , lowercase , lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ : List[Any] = self.image_processor.post_process_object_detection(lowercase , lowercase , lowercase )
A_ : Union[str, Any] = raw_annotations[0]
A_ : List[str] = raw_annotation["""scores"""]
A_ : Any = raw_annotation["""labels"""]
A_ : Dict = raw_annotation["""boxes"""]
A_ : int = scores.tolist()
A_ : Optional[int] = [self.model.config.idalabel[label.item()] for label in labels]
A_ : Optional[Any] = [self._get_bounding_box(lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ : Tuple = ["""score""", """label""", """box"""]
A_ : Optional[Any] = [
dict(zip(lowercase , lowercase ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def _a (self , lowercase ):
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
A_, A_, A_, A_ : Union[str, Any] = box.int().tolist()
A_ : Optional[int] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 686
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
| 686
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : Any = '''markuplm'''
def __init__( self , UpperCamelCase__=30_522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3_072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=256 , UpperCamelCase__=1_024 , UpperCamelCase__=216 , UpperCamelCase__=1_001 , UpperCamelCase__=32 , UpperCamelCase__=50 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ):
"""simple docstring"""
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
# additional properties
a_ = max_depth
a_ = max_xpath_tag_unit_embeddings
a_ = max_xpath_subs_unit_embeddings
a_ = tag_pad_id
a_ = subs_pad_id
a_ = xpath_unit_hidden_size
| 536
|
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = "PoolFormerConfig"
# Base docstring
__lowerCAmelCase = "sail/poolformer_s12"
__lowerCAmelCase = [1, 5_12, 7, 7]
# Image classification docstring
__lowerCAmelCase = "sail/poolformer_s12"
__lowerCAmelCase = "tabby, tabby cat"
__lowerCAmelCase = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : float = 0.0 , lowercase_ : bool = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
a_ = 1 - drop_prob
a_ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
a_ = keep_prob + torch.rand(lowercase_ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
a_ = input.div(lowercase_ ) * random_tensor
return output
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ = None ):
"""simple docstring"""
super().__init__()
a_ = drop_prob
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
return drop_path(UpperCamelCase__ , self.drop_prob , self.training )
def _a ( self ):
"""simple docstring"""
return "p={}".format(self.drop_prob )
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
"""simple docstring"""
super().__init__()
a_ = patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
a_ = stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride)
a_ = padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding)
a_ = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ )
a_ = norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity()
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = self.projection(UpperCamelCase__ )
a_ = self.norm(UpperCamelCase__ )
return embeddings
class __SCREAMING_SNAKE_CASE (nn.GroupNorm ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
a_ = nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ )
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
return self.pool(UpperCamelCase__ ) - hidden_states
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
a_ = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
a_ = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
a_ = PoolFormerDropPath(UpperCamelCase__ )
if isinstance(config.hidden_act , UpperCamelCase__ ):
a_ = ACTaFN[config.hidden_act]
else:
a_ = config.hidden_act
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = self.conva(UpperCamelCase__ )
a_ = self.act_fn(UpperCamelCase__ )
a_ = self.drop(UpperCamelCase__ )
a_ = self.conva(UpperCamelCase__ )
a_ = self.drop(UpperCamelCase__ )
return hidden_states
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
a_ = PoolFormerPooling(UpperCamelCase__ )
a_ = PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
a_ = PoolFormerGroupNorm(UpperCamelCase__ )
a_ = PoolFormerGroupNorm(UpperCamelCase__ )
# Useful for training neural nets
a_ = PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity()
a_ = config.use_layer_scale
if config.use_layer_scale:
a_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
a_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
if self.use_layer_scale:
a_ = self.pooling(self.before_norm(UpperCamelCase__ ) )
a_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
a_ = hidden_states + self.drop_path(UpperCamelCase__ )
a_ = ()
a_ = self.output(self.after_norm(UpperCamelCase__ ) )
a_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
a_ = hidden_states + self.drop_path(UpperCamelCase__ )
a_ = (output,) + outputs
return outputs
else:
a_ = self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) )
# First residual connection
a_ = pooling_output + hidden_states
a_ = ()
# Second residual connection inside the PoolFormerOutput block
a_ = self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) )
a_ = hidden_states + layer_output
a_ = (output,) + outputs
return outputs
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
a_ = config
# stochastic depth decay rule
a_ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
a_ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
a_ = nn.ModuleList(UpperCamelCase__ )
# Transformer blocks
a_ = []
a_ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
a_ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase__ ) )
a_ = nn.ModuleList(UpperCamelCase__ )
def _a ( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True ):
"""simple docstring"""
a_ = () if output_hidden_states else None
a_ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
a_ , a_ = layers
# Get patch embeddings from hidden_states
a_ = embedding_layer(UpperCamelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase__ ):
a_ = blk(UpperCamelCase__ )
a_ = layer_outputs[0]
if output_hidden_states:
a_ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : int = PoolFormerConfig
_a : Optional[Any] = '''poolformer'''
_a : Union[str, Any] = '''pixel_values'''
_a : Optional[int] = True
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _a ( self , UpperCamelCase__ , UpperCamelCase__=False ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
a_ = value
__lowerCAmelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__lowerCAmelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __A , )
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
super().__init__(UpperCamelCase__ )
a_ = config
a_ = PoolFormerEncoder(UpperCamelCase__ )
# Initialize weights and apply final processing
self.post_init()
def _a ( self ):
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , ):
"""simple docstring"""
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
a_ = self.encoder(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
a_ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
a_ = nn.Linear(config.hidden_size , config.hidden_size )
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = self.dense(UpperCamelCase__ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , __A , )
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
super().__init__(UpperCamelCase__ )
a_ = config.num_labels
a_ = PoolFormerModel(UpperCamelCase__ )
# Final norm
a_ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
a_ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , ):
"""simple docstring"""
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.poolformer(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
a_ = outputs[0]
a_ = self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) )
a_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a_ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a_ = 'single_label_classification'
else:
a_ = 'multi_label_classification'
if self.config.problem_type == "regression":
a_ = MSELoss()
if self.num_labels == 1:
a_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a_ = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
elif self.config.problem_type == "single_label_classification":
a_ = CrossEntropyLoss()
a_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a_ = BCEWithLogitsLoss()
a_ = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
a_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
| 536
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCamelCase_ = [[1, 2, 4], [1, 2, 3, 4]]
lowerCamelCase_ = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids , __UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def snake_case ( self ):
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCamelCase_ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = [[1, 2, 3], [1, 2, 4]]
lowerCamelCase_ = DisjunctiveConstraint(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = dc.update(1 )
lowerCamelCase_ = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = dc.update(2 )
lowerCamelCase_ = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = dc.update(3 )
lowerCamelCase_ = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCamelCase_ = DisjunctiveConstraint(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 715
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=10 ):
lowerCamelCase_ = []
for _ in range(UpperCAmelCase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=10 ):
lowerCamelCase_ = []
for step in range(UpperCAmelCase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = os.path.join(UpperCAmelCase_ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCAmelCase_ )
lowerCamelCase_ = torch.load(UpperCAmelCase_ )
scheduler.load_state_dict(UpperCAmelCase_ )
return lrs
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
for a, b in zip(UpperCamelCase , UpperCamelCase ):
self.assertAlmostEqual(UpperCamelCase , UpperCamelCase , delta=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.4, 0.2, -0.5] )
lowerCamelCase_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCamelCase_ = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
lowerCamelCase_ = criterion(UpperCamelCase , UpperCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.4, 0.2, -0.5] )
lowerCamelCase_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCamelCase_ = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase , weight_decay=0.0 , relative_step=UpperCamelCase , scale_parameter=UpperCamelCase , warmup_init=UpperCamelCase , )
for _ in range(1000 ):
lowerCamelCase_ = criterion(UpperCamelCase , UpperCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
_lowerCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_lowerCamelCase = 10
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
for a, b in zip(UpperCamelCase , UpperCamelCase ):
self.assertAlmostEqual(UpperCamelCase , UpperCamelCase , delta=UpperCamelCase , msg=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCamelCase_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCamelCase_ ,lowerCamelCase_ = data
lowerCamelCase_ = scheduler_func(self.optimizer , **UpperCamelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCamelCase_ = unwrap_schedule(UpperCamelCase , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase , UpperCamelCase , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
lowerCamelCase_ = scheduler_func(self.optimizer , **UpperCamelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase ) # wrap to test picklability of the schedule
lowerCamelCase_ = unwrap_and_save_reload_schedule(UpperCamelCase , self.num_steps )
self.assertListEqual(UpperCamelCase , UpperCamelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = fn
def __call__( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.fn(*UpperCamelCase , **UpperCamelCase )
@classmethod
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = list(map(self , scheduler.lr_lambdas ) )
| 445
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowerCAmelCase__ = logging.get_logger(__name__)
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : int ,**lowercase__ : Optional[Any] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowercase = deprecated_arg[3:]
setattr(self ,lowercase__ ,not kwargs.pop(lowercase__ ) )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
__lowercase = kwargs.pop('''torchscript''' ,self.torchscript )
__lowercase = kwargs.pop('''torch_xla_tpu_print_metrics''' ,self.torch_xla_tpu_print_metrics )
__lowercase = kwargs.pop('''fp16_opt_level''' ,self.fpaa_opt_level )
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE : bool = field(default=lowerCamelCase__ , metadata={'help': 'Trace the models using torchscript'} )
SCREAMING_SNAKE_CASE : bool = field(default=lowerCamelCase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
SCREAMING_SNAKE_CASE : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
requires_backends(self ,['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__lowercase = torch.device('''cpu''' )
__lowercase = 0
elif is_torch_tpu_available():
__lowercase = xm.xla_device()
__lowercase = 0
else:
__lowercase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowercase = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE ( self : int ):
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
requires_backends(self ,['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE ( self : str ):
requires_backends(self ,['''torch'''] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
requires_backends(self ,['''torch'''] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return self.n_gpu > 0
| 41
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Optional[int] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
_A : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """sew-d"""
def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict:
"""simple docstring"""
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
__magic_name__ = hidden_size
__magic_name__ = feat_extract_norm
__magic_name__ = feat_extract_activation
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = conv_bias
__magic_name__ = num_conv_pos_embeddings
__magic_name__ = num_conv_pos_embedding_groups
__magic_name__ = len(self.conv_dim )
__magic_name__ = num_hidden_layers
__magic_name__ = intermediate_size
__magic_name__ = squeeze_factor
__magic_name__ = max_position_embeddings
__magic_name__ = position_buckets
__magic_name__ = share_att_key
__magic_name__ = relative_attention
__magic_name__ = norm_rel_ebd
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = hidden_act
__magic_name__ = num_attention_heads
__magic_name__ = hidden_dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = feat_proj_dropout
__magic_name__ = final_dropout
__magic_name__ = layer_norm_eps
__magic_name__ = feature_layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ = apply_spec_augment
__magic_name__ = mask_time_prob
__magic_name__ = mask_time_length
__magic_name__ = mask_time_min_masks
__magic_name__ = mask_feature_prob
__magic_name__ = mask_feature_length
__magic_name__ = mask_feature_min_masks
# ctc loss
__magic_name__ = ctc_loss_reduction
__magic_name__ = ctc_zero_infinity
# sequence classification
__magic_name__ = use_weighted_layer_sum
__magic_name__ = classifier_proj_size
@property
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 76
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """lilt"""
def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = classifier_dropout
__magic_name__ = channel_shrink_ratio
__magic_name__ = max_ad_position_embeddings
| 76
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE = tf.data.AUTOTUNE
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :Optional[int] = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=a__ , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=a__ , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=a__ , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=a__ , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=a__ , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=a__ , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=a__ , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=a__ , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=a__ , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=a__ , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=a__ , default=1e-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=a__ , default=1e-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=a__ , default=5_12 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=a__ , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=a__ , required=a__ , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=a__ , help='Model ID to upload to on the Hugging Face Hub.' )
lowerCAmelCase :str = parser.parse_args()
return args
def UpperCAmelCase ( a__ ):
'''simple docstring'''
try:
if args.tpu_name:
lowerCAmelCase :Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowerCAmelCase :str = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(a__ )
tf.tpu.experimental.initialize_tpu_system(a__ )
return tpu
def UpperCAmelCase ( a__ ):
'''simple docstring'''
lowerCAmelCase :Dict = 0
for file in file_list:
lowerCAmelCase :str = file.split('/' )[-1]
lowerCAmelCase :List[Any] = re.search(R'-\d+-(\d+)\.tfrecord' , a__ ).group(1 )
lowerCAmelCase :Optional[int] = int(a__ )
num_samples += sample_count
return num_samples
def UpperCAmelCase ( a__ , a__ , a__ , a__ , a__ , a__=None ):
'''simple docstring'''
lowerCAmelCase :str = count_samples(a__ )
lowerCAmelCase :Union[str, Any] = tf.data.Dataset.from_tensor_slices(a__ )
if shuffle:
lowerCAmelCase :Any = dataset.shuffle(len(a__ ) )
lowerCAmelCase :Optional[int] = tf.data.TFRecordDataset(a__ , num_parallel_reads=a__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowerCAmelCase :Union[str, Any] = dataset.apply(tf.data.experimental.assert_cardinality(a__ ) )
lowerCAmelCase :Optional[Any] = dataset.map(a__ , num_parallel_calls=a__ )
if shuffle:
assert shuffle_buffer_size is not None
lowerCAmelCase :Optional[Any] = dataset.shuffle(args.shuffle_buffer_size )
lowerCAmelCase :Optional[int] = dataset.batch(a__ , drop_remainder=a__ )
lowerCAmelCase :Tuple = dataset.map(a__ , num_parallel_calls=a__ )
lowerCAmelCase :str = dataset.prefetch(a__ )
return dataset
def UpperCAmelCase ( a__ ):
'''simple docstring'''
if not args.no_tpu:
lowerCAmelCase :Optional[int] = initialize_tpu(a__ )
lowerCAmelCase :Optional[Any] = tf.distribute.TPUStrategy(a__ )
else:
lowerCAmelCase :List[Any] = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
lowerCAmelCase :Any = AutoTokenizer.from_pretrained(args.tokenizer )
lowerCAmelCase :List[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
lowerCAmelCase :List[str] = tokenizer.vocab_size
lowerCAmelCase :Optional[Any] = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(F"""No .tfrecord files found in {args.train_dataset}.""" )
lowerCAmelCase :List[str] = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(F"""No .tfrecord files found in {args.eval_dataset}.""" )
lowerCAmelCase :Tuple = count_samples(a__ )
lowerCAmelCase :List[str] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowerCAmelCase :Any = steps_per_epoch * args.num_epochs
with strategy.scope():
lowerCAmelCase :List[Any] = TFAutoModelForMaskedLM.from_config(a__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowerCAmelCase , lowerCAmelCase :Dict = create_optimizer(
num_train_steps=a__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=a__ , metrics=['accuracy'] )
def decode_fn(a__ ):
lowerCAmelCase :List[Any] = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(a__ , a__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowerCAmelCase :Optional[Any] = DataCollatorForLanguageModeling(
tokenizer=a__ , mlm_probability=args.mlm_probability , mlm=a__ , return_tensors='tf' )
def mask_with_collator(a__ ):
# TF really needs an isin() function
lowerCAmelCase :Optional[Any] = (
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
lowerCAmelCase , lowerCAmelCase :List[Any] = data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(a__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=a__ , )
return batch
lowerCAmelCase :List[str] = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowerCAmelCase :Optional[int] = prepare_dataset(
a__ , decode_fn=a__ , mask_fn=a__ , batch_size=a__ , shuffle=a__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowerCAmelCase :Tuple = prepare_dataset(
a__ , decode_fn=a__ , mask_fn=a__ , batch_size=a__ , shuffle=a__ , )
lowerCAmelCase :Optional[int] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=a__ ) )
model.fit(
a__ , validation_data=a__ , epochs=args.num_epochs , callbacks=a__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = parse_args()
main(args)
| 553
|
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :Tuple = MobileBertConfig.from_json_file(a__ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase :Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
lowerCAmelCase :Any = load_tf_weights_in_mobilebert(a__ , a__ , a__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 553
| 1
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self :Tuple , __A :str , __A :Optional[int]=13 , __A :Union[str, Any]=7 , __A :Optional[Any]=True , __A :Any=True , __A :List[Any]=False , __A :List[Any]=True , __A :Any=99 , __A :List[str]=32 , __A :int=5 , __A :Any=4 , __A :List[Any]=37 , __A :int="gelu" , __A :Tuple=0.1 , __A :Optional[int]=0.1 , __A :Dict=512 , __A :Optional[Any]=16 , __A :List[Any]=2 , __A :Dict=0.0_2 , __A :Dict=3 , __A :Dict=4 , __A :int=None , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self :Optional[int] ) -> int:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , use_stable_embedding=__A , )
def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :Optional[Any] , __A :Union[str, Any] , __A :List[str] , __A :Dict , __A :Dict , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = OpenLlamaModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :Optional[int] , __A :Union[str, Any] , __A :List[Any] , __A :str , __A :Dict , __A :Dict , __A :Optional[Any] , __A :Tuple , __A :Dict , __A :Union[str, Any] , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = OpenLlamaModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , )
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :Any , __A :Tuple , __A :Any , __A :Optional[int] , __A :str , __A :Optional[Any] , __A :List[Any] , __A :str , __A :Tuple , __A :List[Any] , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = OpenLlamaForCausalLM(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :Dict , __A :Tuple , __A :Tuple , __A :Optional[Any] , __A :str , __A :Optional[Any] , __A :Tuple , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = OpenLlamaForCausalLM(config=__A )
model.to(__A )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , use_cache=__A , )
SCREAMING_SNAKE_CASE__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , output_hidden_states=__A , )["""hidden_states"""][0]
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , past_key_values=__A , output_hidden_states=__A , )["""hidden_states"""][0]
# select random slice
SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1E-3 ) )
def _snake_case ( self :List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCamelCase_ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , hidden_size=37 )
def _snake_case ( self :List[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ = type
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self :List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = input_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(__A )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = OpenLlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self :Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = """single_label_classification"""
SCREAMING_SNAKE_CASE__ = input_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(__A )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = OpenLlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = """multi_label_classification"""
SCREAMING_SNAKE_CASE__ = input_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(__A )
SCREAMING_SNAKE_CASE__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ = OpenLlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _snake_case ( self :Tuple , __A :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = ids_tensor([1, 10] , config.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE__ = OpenLlamaModel(__A )
original_model.to(__A )
original_model.eval()
SCREAMING_SNAKE_CASE__ = original_model(__A ).last_hidden_state
SCREAMING_SNAKE_CASE__ = original_model(__A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE__ = {"""type""": scaling_type, """factor""": 10.0}
SCREAMING_SNAKE_CASE__ = OpenLlamaModel(__A )
scaled_model.to(__A )
scaled_model.eval()
SCREAMING_SNAKE_CASE__ = scaled_model(__A ).last_hidden_state
SCREAMING_SNAKE_CASE__ = scaled_model(__A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__A , __A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__A , __A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__A , __A , atol=1E-5 ) )
| 715
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['LayoutLMv3FeatureExtractor']
_lowerCamelCase = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 59
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[int]:
lowercase__: Any = str(__UpperCAmelCase )
lowercase__: Any = [n]
for i in range(1 , len(__UpperCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> bool:
if len(str(__UpperCAmelCase ) ) > 3:
if not is_prime(int(str(__UpperCAmelCase )[-3:] ) ) or not is_prime(int(str(__UpperCAmelCase )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_1 ) -> list[int]:
lowercase__: list[int] = []
lowercase__: Any = 1_3
while len(__UpperCAmelCase ) != count:
if validate(__UpperCAmelCase ):
lowercase__: Optional[Any] = list_truncated_nums(__UpperCAmelCase )
if all(is_prime(__UpperCAmelCase ) for i in list_nums ):
list_truncated_primes.append(__UpperCAmelCase )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(1_1 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(1_1)) = }''')
| 586
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase__: Optional[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase__: Optional[int] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase__: List[str] = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase__: str = model(_UpperCAmelCase , labels=_UpperCAmelCase ).loss
lowercase__: List[str] = -tf.math.reduce_mean(_UpperCAmelCase ).numpy()
lowercase__: List[str] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 586
| 1
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( UpperCAmelCase__ ,unittest.TestCase):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = CTRLTokenizer
UpperCamelCase__ : int = False
UpperCamelCase__ : List[str] = False
def _a ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
a__ = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
a__ = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
a__ = {"unk_token": "<unk>"}
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
def _a ( self , **a_ ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _a ( self , a_ ):
a__ = "adapt react readapt apt"
a__ = "adapt react readapt apt"
return input_text, output_text
def _a ( self ):
a__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ = "adapt react readapt apt"
a__ = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
a__ = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ = tokens + [tokenizer.unk_token]
a__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
| 718
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 351
| 0
|
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ = 2
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: List[str] , *, # begin keyword-only arguments
UpperCamelCase_: Tuple="<s>" , UpperCamelCase_: int="<pad>" , UpperCamelCase_: List[str]="</s>" , UpperCamelCase_: Tuple="<unk>" , UpperCamelCase_: List[Any]=None , ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =bos, unk, pad, eos
UpperCamelCase_ =[]
UpperCamelCase_ =[]
UpperCamelCase_ ={}
UpperCamelCase_ =self.add_symbol(UpperCamelCase_ )
UpperCamelCase_ =self.add_symbol(UpperCamelCase_ )
UpperCamelCase_ =self.add_symbol(UpperCamelCase_ )
UpperCamelCase_ =self.add_symbol(UpperCamelCase_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCamelCase_ )
UpperCamelCase_ =len(self.symbols )
def __eq__( self: str , UpperCamelCase_: Optional[Any] ):
return self.indices == other.indices
def __getitem__( self: str , UpperCamelCase_: str ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self: Any ):
return len(self.symbols )
def __contains__( self: Dict , UpperCamelCase_: int ):
return sym in self.indices
@classmethod
def UpperCamelCase__ ( cls: Optional[Any] , UpperCamelCase_: List[Any] ):
UpperCamelCase_ =cls()
d.add_from_file(UpperCamelCase_ )
return d
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: str=1 , UpperCamelCase_: Any=False ):
if word in self.indices and not overwrite:
UpperCamelCase_ =self.indices[word]
UpperCamelCase_ =self.count[idx] + n
return idx
else:
UpperCamelCase_ =len(self.symbols )
UpperCamelCase_ =idx
self.symbols.append(UpperCamelCase_ )
self.count.append(UpperCamelCase_ )
return idx
def UpperCamelCase__ ( self: Tuple , UpperCamelCase_: int ):
return 0
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: Dict ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
try:
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(UpperCamelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCamelCase_ ) )
return
UpperCamelCase_ =f.readlines()
UpperCamelCase_ =self._load_meta(UpperCamelCase_ )
for line in lines[indices_start_line:]:
try:
UpperCamelCase_ , UpperCamelCase_ =line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
UpperCamelCase_ =True
UpperCamelCase_ , UpperCamelCase_ =line.rsplit(" " , 1 )
else:
UpperCamelCase_ =False
UpperCamelCase_ =int(UpperCamelCase_ )
UpperCamelCase_ =line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(UpperCamelCase_ ) )
self.add_symbol(UpperCamelCase_ , n=UpperCamelCase_ , overwrite=UpperCamelCase_ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def _UpperCamelCase ( A ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCamelCase_ =dict((re.sub(R"@@$" , "" , A ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , A ), v) for k, v in d.items() )
UpperCamelCase_ ="<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
UpperCamelCase_ =d[k] # restore
return da
def _UpperCamelCase ( A , A ):
# prep
if not os.path.exists(A ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(A , exist_ok=A )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
UpperCamelCase_ =os.path.join(A , "checkpoint.pt" )
if not os.path.isfile(A ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
UpperCamelCase_ =torch.load(A , map_location="cpu" )
UpperCamelCase_ =chkpt["cfg"]["model"]
# dicts
UpperCamelCase_ =os.path.join(A , "dict.txt" )
if not os.path.isfile(A ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
UpperCamelCase_ =Dictionary.load(A )
UpperCamelCase_ =rewrite_dict_keys(src_dict.indices )
UpperCamelCase_ =len(A )
UpperCamelCase_ =os.path.join(A , VOCAB_FILES_NAMES["vocab_file"] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(A , ensure_ascii=A , indent=A ) )
# merges_file (bpecodes)
UpperCamelCase_ =os.path.join(A , "bpecodes" )
if not os.path.isfile(A ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
UpperCamelCase_ =os.path.join(A , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(A , A )
# model config
UpperCamelCase_ =os.path.join(A , "config.json" )
UpperCamelCase_ ={
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1e-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(A , ensure_ascii=A , indent=A ) )
# tokenizer config
UpperCamelCase_ =os.path.join(A , A )
UpperCamelCase_ ={
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1_024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(A , ensure_ascii=A , indent=A ) )
# model
UpperCamelCase_ =chkpt["model"]
# remove unneeded keys
UpperCamelCase_ =[
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(A , A )
UpperCamelCase_ =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
UpperCamelCase_ =model_state_dict.pop(A )
else:
UpperCamelCase_ =model_state_dict.pop(A )
UpperCamelCase_ =BioGptConfig.from_pretrained(A )
UpperCamelCase_ =BioGptForCausalLM(A )
# check that it loads ok
model_new.load_state_dict(A )
# save
UpperCamelCase_ =os.path.join(A , A )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(A , A )
print("Conversion is done!" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 391
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = "bert"
def __init__( self: str , UpperCamelCase_: List[Any]=3_0522 , UpperCamelCase_: Optional[Any]=768 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: List[Any]=12 , UpperCamelCase_: Any=3072 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: int=512 , UpperCamelCase_: str=2 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: int=1e-1_2 , UpperCamelCase_: List[Any]=0 , UpperCamelCase_: int="absolute" , UpperCamelCase_: int=True , UpperCamelCase_: str=None , **UpperCamelCase_: List[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ =vocab_size
UpperCamelCase_ =hidden_size
UpperCamelCase_ =num_hidden_layers
UpperCamelCase_ =num_attention_heads
UpperCamelCase_ =hidden_act
UpperCamelCase_ =intermediate_size
UpperCamelCase_ =hidden_dropout_prob
UpperCamelCase_ =attention_probs_dropout_prob
UpperCamelCase_ =max_position_embeddings
UpperCamelCase_ =type_vocab_size
UpperCamelCase_ =initializer_range
UpperCamelCase_ =layer_norm_eps
UpperCamelCase_ =position_embedding_type
UpperCamelCase_ =use_cache
UpperCamelCase_ =classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self: int ):
if self.task == "multiple-choice":
UpperCamelCase_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 391
| 1
|
import itertools
import math
def __UpperCamelCase ( snake_case ) -> Tuple:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
__A = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def __UpperCamelCase ( snake_case = 1_0_0_0_1 ) -> str:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 718
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_12 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , )-> int:
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
__A = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self )-> str:
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )-> Union[str, Any]:
__A = OpenAIGPTModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__A = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , head_mask=UpperCAmelCase )
__A = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
__A = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )-> Tuple:
__A = OpenAIGPTLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__A = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )-> Dict:
__A = OpenAIGPTDoubleHeadsModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__A = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )-> List[Any]:
__A = self.num_labels
__A = OpenAIGPTForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self )-> Tuple:
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase( _a , _a , _a , unittest.TestCase):
"""simple docstring"""
lowerCamelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase__ = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> str:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False )-> List[str]:
__A = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase , )
__A = inputs_dict['''labels''']
__A = inputs_dict['''labels''']
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCAmelCase , )
__A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self )-> Tuple:
__A = OpenAIGPTModelTester(self )
__A = ConfigTester(self , config_class=UpperCAmelCase , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self )-> str:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self )-> int:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> Tuple:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = OpenAIGPTModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class _lowerCAmelCase( unittest.TestCase):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> List[str]:
__A = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(UpperCAmelCase )
__A = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=UpperCAmelCase ) # the president is
__A = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__A = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase )
| 341
| 0
|
import re
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : List[Any] = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 89
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_( ) -> List[Any]:
_lowercase : int = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_lowercase : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
_lowercase : Any = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowercase : Optional[int] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 89
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase : Dict = tuple[int, int]
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = pos_x
__UpperCamelCase = pos_y
__UpperCamelCase = (pos_y, pos_x)
__UpperCamelCase = goal_x
__UpperCamelCase = goal_y
__UpperCamelCase = g_cost
__UpperCamelCase = parent
__UpperCamelCase = self.calculate_heuristic()
__UpperCamelCase = self.g_cost + self.h_cost
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.pos_x - self.goal_x
__UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCAmelCase__ ) + abs(UpperCAmelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , __UpperCAmelCase ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase__ )
__UpperCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , UpperCAmelCase__ )
__UpperCamelCase = [self.start]
__UpperCamelCase = []
__UpperCamelCase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCAmelCase__ )
self.closed_nodes.append(UpperCAmelCase__ )
__UpperCamelCase = self.get_successors(UpperCAmelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCAmelCase__ )
else:
# retrieve the best current path
__UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCAmelCase__ )
else:
self.open_nodes.append(UpperCAmelCase__ )
return [self.start.pos]
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for action in delta:
__UpperCamelCase = parent.pos_x + action[1]
__UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCAmelCase__ , UpperCAmelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase__ , ) )
return successors
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = node
__UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase = current_node.parent
path.reverse()
return path
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = AStar(UpperCAmelCase__ , UpperCAmelCase__ )
__UpperCamelCase = AStar(UpperCAmelCase__ , UpperCAmelCase__ )
__UpperCamelCase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCAmelCase__ , UpperCAmelCase__ )
self.fwd_astar.closed_nodes.append(UpperCAmelCase__ )
self.bwd_astar.closed_nodes.append(UpperCAmelCase__ )
__UpperCamelCase = current_bwd_node
__UpperCamelCase = current_fwd_node
__UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCAmelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCAmelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCAmelCase__ )
else:
# retrieve the best current path
__UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(UpperCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCAmelCase__ )
else:
astar.open_nodes.append(UpperCAmelCase__ )
return [self.fwd_astar.start.pos]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.fwd_astar.retrace_path(UpperCAmelCase__ )
__UpperCamelCase = self.bwd_astar.retrace_path(UpperCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase : Dict = (0, 0)
UpperCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase : int = time.time()
UpperCamelCase : int = AStar(init, goal)
UpperCamelCase : List[Any] = a_star.search()
UpperCamelCase : List[Any] = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
UpperCamelCase : Optional[int] = time.time()
UpperCamelCase : str = BidirectionalAStar(init, goal)
UpperCamelCase : Any = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 718
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "gptj"
lowercase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __UpperCAmelCase=5_0400 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=5_0256 , __UpperCAmelCase=5_0256 , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = n_positions
__UpperCamelCase = n_embd
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = n_inner
__UpperCamelCase = rotary_dim
__UpperCamelCase = activation_function
__UpperCamelCase = resid_pdrop
__UpperCamelCase = embd_pdrop
__UpperCamelCase = attn_pdrop
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , 'pad_token_id' , __UpperCAmelCase ):
# TODO: how to do that better?
__UpperCamelCase = 0
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='inputs' )
__UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._config.n_head
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
'''simple docstring'''
__UpperCamelCase = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs['attention_mask']
if self.use_past:
__UpperCamelCase = ordered_inputs['attention_mask'].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return 13
| 293
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_snake_case = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_snake_case = {'facebook/blenderbot_small-90M': 512}
def _a ( __lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = set()
__UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase = char
__UpperCamelCase = set(__lowercase )
return pairs
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="__start__" , _SCREAMING_SNAKE_CASE="__end__" , _SCREAMING_SNAKE_CASE="__unk__" , _SCREAMING_SNAKE_CASE="__null__" , **_SCREAMING_SNAKE_CASE , ) -> Any:
super().__init__(unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase = json.load(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as merges_handle:
__UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
__UpperCamelCase = [tuple(merge.split() ) for merge in merges]
__UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__UpperCamelCase = {}
@property
def __lowercase( self ) -> int:
return len(self.encoder )
def __lowercase( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> str:
if token in self.cache:
return self.cache[token]
__UpperCamelCase = re.sub('([.,!?()])' , r' \1' , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = re.sub('(\')' , r' \1 ' , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = re.sub(r'\s{2,}' , ' ' , _SCREAMING_SNAKE_CASE )
if "\n" in token:
__UpperCamelCase = token.replace('\n' , ' __newln__' )
__UpperCamelCase = token.split(' ' )
__UpperCamelCase = []
for token in tokens:
if not len(_SCREAMING_SNAKE_CASE ):
continue
__UpperCamelCase = token.lower()
__UpperCamelCase = tuple(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
__UpperCamelCase = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
words.append(_SCREAMING_SNAKE_CASE )
continue
while True:
__UpperCamelCase = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase , __UpperCamelCase = bigram
__UpperCamelCase = []
__UpperCamelCase = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
__UpperCamelCase = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
new_word.extend(word[i:j] )
__UpperCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase = tuple(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
__UpperCamelCase = get_pairs(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = '@@ '.join(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = word[:-4]
__UpperCamelCase = word
words.append(_SCREAMING_SNAKE_CASE )
return " ".join(_SCREAMING_SNAKE_CASE )
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
__UpperCamelCase = []
__UpperCamelCase = re.findall(r'\S+\n?' , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(' ' ) ) )
return split_tokens
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> int:
__UpperCamelCase = token.lower()
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> str:
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> str:
__UpperCamelCase = ' '.join(_SCREAMING_SNAKE_CASE ).replace('@@ ' , '' ).strip()
return out_string
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + '\n' )
__UpperCamelCase = 0
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__UpperCamelCase = token_index
writer.write(' '.join(_SCREAMING_SNAKE_CASE ) + '\n' )
index += 1
return vocab_file, merge_file
| 383
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCAmelCase_ :
"""simple docstring"""
@staticmethod
def __lowercase( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
pass
def _a ( __lowercase ) -> str:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_snake_case = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
__UpperCamelCase = pipeline(
'document-question-answering' , model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = list(zip(*apply_tesseract(load_image(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , '' ) ) )
__UpperCamelCase = 'What is the placebo?'
__UpperCamelCase = [
{
'image': load_image(_SCREAMING_SNAKE_CASE ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__UpperCamelCase = dqa_pipeline(_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{'score': ANY(_SCREAMING_SNAKE_CASE ), 'answer': ANY(_SCREAMING_SNAKE_CASE ), 'start': ANY(_SCREAMING_SNAKE_CASE ), 'end': ANY(_SCREAMING_SNAKE_CASE )},
{'score': ANY(_SCREAMING_SNAKE_CASE ), 'answer': ANY(_SCREAMING_SNAKE_CASE ), 'start': ANY(_SCREAMING_SNAKE_CASE ), 'end': ANY(_SCREAMING_SNAKE_CASE )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __lowercase( self ) -> Dict:
__UpperCamelCase = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'How many cats are there?'
__UpperCamelCase = [
{'score': 0.0_0_0_1, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0_0_0_1, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , _SCREAMING_SNAKE_CASE )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__UpperCamelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(_SCREAMING_SNAKE_CASE , [] )
# We can optionnally pass directly the words and bounding boxes
__UpperCamelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , words=_SCREAMING_SNAKE_CASE , boxes=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(_SCREAMING_SNAKE_CASE , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowercase( self ) -> str:
__UpperCamelCase = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__UpperCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__UpperCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowercase( self ) -> int:
__UpperCamelCase = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__UpperCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__UpperCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_SCREAMING_SNAKE_CASE , revision='3dc6de3' , )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
__UpperCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
__UpperCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
__UpperCamelCase = list(zip(*apply_tesseract(load_image(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , '' ) ) )
# This model should also work if `image` is set to None
__UpperCamelCase = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowercase( self ) -> Dict:
__UpperCamelCase = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_SCREAMING_SNAKE_CASE , revision='3dc6de3' , max_seq_len=50 , )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__UpperCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
__UpperCamelCase = list(zip(*apply_tesseract(load_image(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , '' ) ) )
# This model should also work if `image` is set to None
__UpperCamelCase = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def __lowercase( self ) -> Union[str, Any]:
__UpperCamelCase = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def __lowercase( self ) -> Optional[Any]:
pass
| 383
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list:
if n_term == "":
return []
__lowerCamelCase : list = []
for temp in range(int(lowerCamelCase__ ) ):
series.append(F"1/{temp + 1}" if series else '1' )
return series
if __name__ == "__main__":
a =input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 715
|
from math import factorial
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0 ) -> int:
return sum(int(lowerCamelCase__ ) for x in str(factorial(lowerCamelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 337
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """yolos"""
def __init__( self , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=[512, 864] , __lowerCamelCase=16 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=100 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=1 , __lowerCamelCase=5 , __lowerCamelCase=2 , __lowerCamelCase=5 , __lowerCamelCase=2 , __lowerCamelCase=0.1 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
__A : Dict = hidden_size
__A : List[Any] = num_hidden_layers
__A : Optional[Any] = num_attention_heads
__A : Union[str, Any] = intermediate_size
__A : int = hidden_act
__A : Optional[Any] = hidden_dropout_prob
__A : List[Any] = attention_probs_dropout_prob
__A : Optional[Any] = initializer_range
__A : Optional[int] = layer_norm_eps
__A : str = image_size
__A : Tuple = patch_size
__A : Dict = num_channels
__A : Optional[int] = qkv_bias
__A : str = num_detection_tokens
__A : Optional[Any] = use_mid_position_embeddings
__A : Union[str, Any] = auxiliary_loss
# Hungarian matcher
__A : List[Any] = class_cost
__A : int = bbox_cost
__A : Optional[int] = giou_cost
# Loss coefficients
__A : Tuple = bbox_loss_coefficient
__A : List[str] = giou_loss_coefficient
__A : List[Any] = eos_coefficient
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse("""1.11""" )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return 1e-4
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return 12
| 177
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : List[Any] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : str = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : int = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
__A : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__A : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__A : List[Any] = DDPMScheduler()
__A : Optional[Any] = AudioDiffusionPipeline(vqvae=__lowerCamelCase , unet=self.dummy_unet , mel=__lowerCamelCase , scheduler=__lowerCamelCase )
__A : Tuple = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
__A : List[str] = pipe(generator=__lowerCamelCase , steps=4 )
__A : Union[str, Any] = output.audios[0]
__A : str = output.images[0]
__A : List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
__A : Any = pipe(generator=__lowerCamelCase , steps=4 , return_dict=__lowerCamelCase )
__A : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__A : Tuple = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__A : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
__A : Any = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__A : int = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__A : Any = DDIMScheduler()
__A : Optional[Any] = self.dummy_vqvae_and_unet
__A : int = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__lowerCamelCase , scheduler=__lowerCamelCase )
__A : Any = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
np.random.seed(0 )
__A : str = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__A : Union[str, Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
__A : int = pipe(raw_audio=__lowerCamelCase , generator=__lowerCamelCase , start_step=5 , steps=10 )
__A : Any = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__A : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__A : Optional[Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__A : Union[str, Any] = self.dummy_unet_condition
__A : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__lowerCamelCase , mel=__lowerCamelCase , scheduler=__lowerCamelCase )
__A : Dict = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
np.random.seed(0 )
__A : int = torch.rand((1, 1, 10) )
__A : Optional[Any] = pipe(generator=__lowerCamelCase , encoding=__lowerCamelCase )
__A : str = output.images[0]
__A : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__A : Tuple = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = torch_device
__A : List[Any] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
__A : List[Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Dict = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
__A : str = pipe(generator=__lowerCamelCase )
__A : Dict = output.audios[0]
__A : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__A : Optional[int] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__A : Optional[Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 177
| 1
|
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( __A , __A):
@register_to_config
def __init__( self , *,
snake_case_ = 4 , snake_case_ = 7_68 , snake_case_ , snake_case_ , ):
super().__init__()
_snake_case : List[str] = nn.Parameter(torch.zeros(UpperCamelCase__ ) )
# parameters for additional clip time embeddings
_snake_case : Any = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
_snake_case : str = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
# parameters for encoder hidden states
_snake_case : str = clip_extra_context_tokens
_snake_case : str = nn.Linear(
UpperCamelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
_snake_case : Tuple = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
_snake_case : Union[str, Any] = nn.LayerNorm(UpperCamelCase__ )
def lowerCamelCase__ ( self , *, snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_snake_case : Union[str, Any] = image_embeddings.shape[0]
_snake_case : List[str] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_snake_case : Union[str, Any] = classifier_free_guidance_embeddings.expand(
UpperCamelCase__ , -1 )
_snake_case : Union[str, Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_snake_case : Union[str, Any] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_snake_case : List[Any] = self.embedding_proj(UpperCamelCase__ )
_snake_case : Optional[Any] = self.clip_image_embeddings_project_to_time_embeddings(UpperCamelCase__ )
_snake_case : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_snake_case : Optional[Any] = self.clip_extra_context_tokens_proj(UpperCamelCase__ )
_snake_case : int = clip_extra_context_tokens.reshape(UpperCamelCase__ , -1 , self.clip_extra_context_tokens )
_snake_case : Any = clip_extra_context_tokens.permute(0 , 2 , 1 )
_snake_case : Union[str, Any] = self.encoder_hidden_states_proj(UpperCamelCase__ )
_snake_case : Optional[Any] = self.text_encoder_hidden_states_norm(UpperCamelCase__ )
_snake_case : Any = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 708
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def a__ ( a : float , a : float , a : bool = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(a ), magnitude * sin(a )]
return [magnitude * cos(radians(a ) ), magnitude * sin(radians(a ) )]
def a__ ( a : NDArray[floataa] , a : NDArray[floataa] , a : float = 10**-1 ):
"""simple docstring"""
_snake_case : NDArray[floataa] = cross(a , a )
_snake_case : float = sum(a )
return abs(a ) < eps
if __name__ == "__main__":
# Test to check if it works
_a : Tuple = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_a : List[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_a : List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_a : List[str] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
_a : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 87
| 0
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase__ : List[str] = False
lowercase__ : List[Any] = False
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
return TrainCommand(snake_case__ )
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=__SCREAMING_SNAKE_CASE , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=__SCREAMING_SNAKE_CASE , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=__SCREAMING_SNAKE_CASE , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=__SCREAMING_SNAKE_CASE , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=__SCREAMING_SNAKE_CASE , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=__SCREAMING_SNAKE_CASE , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=__SCREAMING_SNAKE_CASE , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=__SCREAMING_SNAKE_CASE , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=__SCREAMING_SNAKE_CASE , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=__SCREAMING_SNAKE_CASE , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=__SCREAMING_SNAKE_CASE , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=__SCREAMING_SNAKE_CASE , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = logging.get_logger('''transformers-cli/training''' )
lowerCAmelCase = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = args.output
lowerCAmelCase = args.column_label
lowerCAmelCase = args.column_text
lowerCAmelCase = args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
lowerCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
lowerCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase = None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
lowerCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase = args.validation_split
lowerCAmelCase = args.train_batch_size
lowerCAmelCase = args.valid_batch_size
lowerCAmelCase = args.learning_rate
lowerCAmelCase = args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 312
|
from math import factorial
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0 ) -> int:
return sum(int(snake_case__ ) for x in str(factorial(snake_case__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 312
| 1
|
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase( lowerCamelCase ):
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__a , '''embed_dim'''))
self.parent.assertTrue(hasattr(__a , '''num_heads'''))
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1e-12 , __a=True , __a=True , __a=2 , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_sizes
_UpperCamelCase = patch_stride
_UpperCamelCase = patch_padding
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = num_labels
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = num_heads
_UpperCamelCase = stride_kv
_UpperCamelCase = depth
_UpperCamelCase = cls_token
_UpperCamelCase = attention_drop_rate
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = CvtModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
_UpperCamelCase = (self.image_size, self.image_size)
_UpperCamelCase , _UpperCamelCase = image_size[0], image_size[1]
for i in range(len(self.depth)):
_UpperCamelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
_UpperCamelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def UpperCAmelCase ( self , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = CvtForImageClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
lowercase__ = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = CvtModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return
@unittest.skip(reason='''Cvt does not output attentions''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''')
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a):
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = len(self.model_tester.depth)
self.assertEqual(len(__a) , __a)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = CvtModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__a)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor([0.9285, 0.9015, -0.3150]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
| 78
|
"""simple docstring"""
from math import sqrt
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
_UpperCamelCase = True
# 0 and 1 are none primes.
if number <= 1:
_UpperCamelCase = False
for divisor in range(2, int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_UpperCamelCase = False
break
# precondition
assert isinstance(__snake_case, __snake_case ), "'status' must been from type bool"
return status
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_UpperCamelCase = list(range(2, n + 1 ) )
_UpperCamelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1, len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_UpperCamelCase = 0
# filters actual prime numbers.
_UpperCamelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (n > 2), "'N' must been an int and > 2"
_UpperCamelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and number >= 0, "'number' must been an int and >= 0"
_UpperCamelCase = [] # this list will be returns of the function.
# potential prime number factors.
_UpperCamelCase = 2
_UpperCamelCase = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCamelCase = 0
# prime factorization of 'number'
_UpperCamelCase = prime_factorization(__snake_case )
_UpperCamelCase = max(__snake_case )
# precondition
assert isinstance(__snake_case, __snake_case ), "'ans' must been from type int"
return ans
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCamelCase = 0
# prime factorization of 'number'
_UpperCamelCase = prime_factorization(__snake_case )
_UpperCamelCase = min(__snake_case )
# precondition
assert isinstance(__snake_case, __snake_case ), "'ans' must been from type int"
return ans
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0, __snake_case ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0, __snake_case ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
assert (
isinstance(__snake_case, __snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
_UpperCamelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_UpperCamelCase = get_prime_numbers(__snake_case )
_UpperCamelCase = len(__snake_case )
# run variable for while-loops.
_UpperCamelCase = 0
_UpperCamelCase = None
# exit variable. for break up the loops
_UpperCamelCase = True
while i < len_pn and loop:
_UpperCamelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_UpperCamelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case, __snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str:
"""simple docstring"""
assert (
isinstance(__snake_case, __snake_case )
and isinstance(__snake_case, __snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_UpperCamelCase = 0
while numbera != 0:
_UpperCamelCase = numbera % numbera
_UpperCamelCase = numbera
_UpperCamelCase = rest
# precondition
assert isinstance(__snake_case, __snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
assert (
isinstance(__snake_case, __snake_case )
and isinstance(__snake_case, __snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_UpperCamelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_UpperCamelCase = prime_factorization(__snake_case )
_UpperCamelCase = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = max(__snake_case, __snake_case )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_UpperCamelCase = prime_fac_a.count(__snake_case )
_UpperCamelCase = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case, __snake_case ) ):
ans *= n
else:
_UpperCamelCase = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_UpperCamelCase = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case, __snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'number' must been a positive int"
_UpperCamelCase = 0
_UpperCamelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case, __snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_UpperCamelCase = p_number_a + 1 # jump to the next number
_UpperCamelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case, __snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (n >= 1), "'n' must been int and >= 1"
_UpperCamelCase = [] # will be returned.
for divisor in range(1, n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
_UpperCamelCase = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case, __snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(__snake_case, __snake_case )
and isinstance(__snake_case, __snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_UpperCamelCase = gcd(abs(__snake_case ), abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case, __snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'n' must been a int and >= 0"
_UpperCamelCase = 1 # this will be return.
for factor in range(1, n + 1 ):
ans *= factor
return ans
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'n' must been an int and >= 0"
_UpperCamelCase = 0
_UpperCamelCase = 1
_UpperCamelCase = 1 # this will be return
for _ in range(n - 1 ):
_UpperCamelCase = ans
ans += fiba
_UpperCamelCase = tmp
return ans
| 78
| 1
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCamelCase : List[Any] = get_logger()
_lowerCamelCase : Optional[dict] = None
class lowerCamelCase (TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Any, _UpperCAmelCase : Dict=None, _UpperCAmelCase : Dict=None, **_UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__(features=a_ )
import jax
from jaxlib.xla_client import Device
if isinstance(a_, a_ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(a_ )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = device if isinstance(a_, a_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE__ : List[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
SCREAMING_SNAKE_CASE__ : Tuple = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = jnp_array_kwargs
@staticmethod
def A_ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a_ ): device for device in jax.devices()}
def A_ ( self : str, _UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a_, a_ ) and column:
if all(
isinstance(a_, jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a_, axis=0 )
return column
def A_ ( self : int, _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a_, (str, bytes, type(a_ )) ):
return value
elif isinstance(a_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
if isinstance(a_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE__ : str = {'dtype': jnp.intaa}
else:
SCREAMING_SNAKE_CASE__ : List[Any] = {'dtype': jnp.intaa}
elif isinstance(a_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
SCREAMING_SNAKE_CASE__ : Optional[int] = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a_, PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a_, **{**default_dtype, **self.jnp_array_kwargs} )
def A_ ( self : Optional[Any], _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a_, torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a_, "__array__" ) and not isinstance(a_, jax.Array ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a_, np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
elif isinstance(a_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
return self._tensorize(a_ )
def A_ ( self : Union[str, Any], _UpperCAmelCase : dict ) -> List[str]:
"""simple docstring"""
return map_nested(self._recursive_tensorize, a_, map_list=a_ )
def A_ ( self : Dict, _UpperCAmelCase : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.numpy_arrow_extractor().extract_row(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.python_features_decoder.decode_row(a_ )
return self.recursive_tensorize(a_ )
def A_ ( self : Any, _UpperCAmelCase : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.numpy_arrow_extractor().extract_column(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.python_features_decoder.decode_column(a_, pa_table.column_names[0] )
SCREAMING_SNAKE_CASE__ : Dict = self.recursive_tensorize(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = self._consolidate(a_ )
return column
def A_ ( self : Optional[int], _UpperCAmelCase : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.numpy_arrow_extractor().extract_batch(a_ )
SCREAMING_SNAKE_CASE__ : str = self.python_features_decoder.decode_batch(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.recursive_tensorize(a_ )
for column_name in batch:
SCREAMING_SNAKE_CASE__ : Tuple = self._consolidate(batch[column_name] )
return batch
| 663
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE__ : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
SCREAMING_SNAKE_CASE__ : Tuple = json.load(f)
@require_torch
class snake_case ( unittest.TestCase ):
def __lowercase( self : List[str] , a_ : Any )-> str:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(a_ )
def __lowercase( self : int , a_ : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FSMTForConditionalGeneration.from_pretrained(a_ ).to(a_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def __lowercase( self : int , a_ : Optional[int] , a_ : str )-> List[str]:
"""simple docstring"""
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
SCREAMING_SNAKE_CASE__ : Any = F'''facebook/wmt19-{pair}'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_model(a_ )
SCREAMING_SNAKE_CASE__ : int = bleu_data[pair]['src']
SCREAMING_SNAKE_CASE__ : Optional[int] = bleu_data[pair]['tgt']
SCREAMING_SNAKE_CASE__ : Any = tokenizer(a_ , return_tensors='pt' , truncation=a_ , padding='longest' ).to(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(
a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_bleu(a_ , a_ )
print(a_ )
self.assertGreaterEqual(scores['bleu'] , a_ )
| 85
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
SCREAMING_SNAKE_CASE__ :Dict = mf_knapsack(i - 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ :Any = max(
mf_knapsack(i - 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , mf_knapsack(i - 1 , UpperCAmelCase__ , UpperCAmelCase__ , j - wt[i - 1] ) + val[i - 1] , )
SCREAMING_SNAKE_CASE__ :str = val
return f[i][j]
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
SCREAMING_SNAKE_CASE__ :List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
SCREAMING_SNAKE_CASE__ :int = dp[i - 1][w_]
return dp[n][w_], dp
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : list , UpperCAmelCase__ : list ) -> Optional[int]:
'''simple docstring'''
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(UpperCAmelCase__ , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
SCREAMING_SNAKE_CASE__ :Optional[int] = len(UpperCAmelCase__ )
if num_items != len(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :Any = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(UpperCAmelCase__ )} values'''
)
raise ValueError(UpperCAmelCase__ )
for i in range(UpperCAmelCase__ ):
if not isinstance(wt[i] , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :Optional[int] = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = knapsack(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :set = set()
_construct_solution(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return optimal_val, example_optional_set
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : list , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : set ) -> int:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCAmelCase__ , UpperCAmelCase__ , i - 1 , UpperCAmelCase__ , UpperCAmelCase__ )
else:
optimal_set.add(UpperCAmelCase__ )
_construct_solution(UpperCAmelCase__ , UpperCAmelCase__ , i - 1 , j - wt[i - 1] , UpperCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase_ = [3, 2, 4, 4]
UpperCamelCase_ = [4, 3, 2, 3]
UpperCamelCase_ = 4
UpperCamelCase_ = 6
UpperCamelCase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCamelCase_ , UpperCamelCase_ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCamelCase_ , UpperCamelCase_ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 320
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : int = LongformerTokenizer
A_ : int = True
A_ : Optional[Any] = LongformerTokenizerFast
A_ : Tuple = True
def __lowerCamelCase ( self : int ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE__ :Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE__ :Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE__ :Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE__ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase_ ) )
def __lowerCamelCase ( self : Tuple , **UpperCamelCase_ : Union[str, Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] , **UpperCamelCase_ : Union[str, Any] ) -> Any:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Tuple = 'lower newer'
SCREAMING_SNAKE_CASE__ :Tuple = 'lower newer'
return input_text, output_text
def __lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ :Any = 'lower newer'
SCREAMING_SNAKE_CASE__ :Optional[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer.tokenize(UpperCamelCase_ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ :List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=UpperCamelCase_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=UpperCamelCase_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __lowerCamelCase ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :int = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowerCamelCase ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ :Any = 'Encode this sequence.'
SCREAMING_SNAKE_CASE__ :int = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE__ :str = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE__ :List[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE__ :Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )} ) # mask token has a left space
SCREAMING_SNAKE_CASE__ :Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE__ :Optional[Any] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE__ :Tuple = tokenizer.encode(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = encoded.index(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tokenizer.encode(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = encoded.index(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowerCamelCase ( self : Dict ) -> List[str]:
pass
def __lowerCamelCase ( self : List[Any] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE__ :str = tokenizer_r.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = tokenizer_p.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
SCREAMING_SNAKE_CASE__ :int = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE__ :str = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCamelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def __lowerCamelCase ( self : Dict ) -> List[str]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
SCREAMING_SNAKE_CASE__ :int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE__ :str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , UpperCamelCase_ )
self.assertEqual(post_processor_state['add_prefix_space'] , UpperCamelCase_ )
self.assertEqual(post_processor_state['trim_offsets'] , UpperCamelCase_ )
def __lowerCamelCase ( self : Dict ) -> List[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ :Tuple = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE__ :Any = f'''{text_of_1_token} {text_of_1_token}'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :str = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ), len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ), len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :int = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE__ :int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ) + 1, 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ), 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ), 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
| 320
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 547
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
__lowercase = int(input('''Enter number: ''').strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 167
| 0
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowercase = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowercase = [ord(letter) for letter in string.ascii_lowercase]
_lowercase = {ord(char) for char in VALID_CHARS}
_lowercase = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCAmelCase__ ( UpperCamelCase_ : list[int] , UpperCamelCase_ : tuple[int, ...] )-> str | None:
A__ = ""
A__ = 42
A__ = 42
A__ = 42
for keychar, cipherchar in zip(cycle(a_ ) , a_ ):
A__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(a_ )
return decoded
def lowerCAmelCase__ ( UpperCamelCase_ : list[int] )-> list[str]:
A__ = []
for key in product(a_ , repeat=3 ):
A__ = try_key(a_ , a_ )
if encoded is not None:
possibles.append(a_ )
return possibles
def lowerCAmelCase__ ( UpperCamelCase_ : list[str] , UpperCamelCase_ : str )-> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCAmelCase__ ( UpperCamelCase_ : str = "p059_cipher.txt" )-> int:
A__ = 42
A__ = 42
A__ = 42
A__ = 42
A__ = Path(a_ ).parent.joinpath(a_ ).read_text(encoding='''utf-8''' )
A__ = [int(a_ ) for number in data.strip().split(''',''' )]
A__ = filter_valid_chars(a_ )
for common_word in COMMON_WORDS:
A__ = filter_common_word(a_ , a_ )
if len(a_ ) == 1:
break
A__ = possibles[0]
return sum(ord(a_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }")
| 718
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''timm_backbone'''
def __init__( self , a__=None , a__=3 , a__=True , a__=True , a__=None , **a__ , ):
super().__init__(**a__)
A__ = backbone
A__ = num_channels
A__ = features_only
A__ = use_pretrained_backbone
A__ = True
A__ = out_indices if out_indices is not None else (-1,)
| 526
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''camembert'''
def __init__( self : Optional[int] ,__A : Optional[int]=3_0522 ,__A : List[Any]=768 ,__A : Optional[int]=12 ,__A : int=12 ,__A : Dict=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : Any=0.1 ,__A : int=512 ,__A : Union[str, Any]=2 ,__A : Union[str, Any]=0.02 ,__A : Union[str, Any]=1e-12 ,__A : Dict=1 ,__A : List[str]=0 ,__A : int=2 ,__A : Tuple="absolute" ,__A : Union[str, Any]=True ,__A : Optional[Any]=None ,**__A : Optional[Any] ,) -> Optional[int]:
super().__init__(pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = position_embedding_type
_lowercase = use_cache
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 67
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__A = object()
# For specifying empty leaf dict `{}`
__A = object()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase: Dict = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE ) + 1 ):
__lowerCAmelCase: Tuple = [x.match(__SCREAMING_SNAKE_CASE ) for x, y in zip(__SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(__SCREAMING_SNAKE_CASE ):
return True
return False
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for rule, replacement in rules:
if _match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def a__ ( ) -> str:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , __SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("mp" , __SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__SCREAMING_SNAKE_CASE , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , __SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__SCREAMING_SNAKE_CASE , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , __SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase: Any = _get_partition_rules()
__lowerCAmelCase: List[Any] = _replacement_rules(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = {k: _unmatched for k in flatten_dict(__SCREAMING_SNAKE_CASE )}
__lowerCAmelCase: Any = {k: replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__SCREAMING_SNAKE_CASE ) )
| 346
| 0
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
snake_case : Optional[List[str]] = None
snake_case : Any = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
snake_case : List[Any] = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowerCAmelCase__ :
__A : bool = True
__A : Optional[str] = None
# Automatically constructed
__A : ClassVar[str] = "PIL.Image.Image"
__A : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__A : str = field(default='Image' , init=UpperCamelCase , repr=UpperCamelCase )
def __call__( self : List[str]):
return self.pa_type
def _lowercase ( self : Dict , _A : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if isinstance(_A , _A):
A__ : Dict = np.array(_A)
if isinstance(_A , _A):
return {"path": value, "bytes": None}
elif isinstance(_A , _A):
return {"path": None, "bytes": value}
elif isinstance(_A , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_A)
elif isinstance(_A , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_A)
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.')
def _lowercase ( self : str , _A : dict , _A : Any=None):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'.")
if token_per_repo_id is None:
A__ : Any = {}
A__ , A__ : List[str] = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.')
else:
if is_local_path(_A):
A__ : Optional[int] = PIL.Image.open(_A)
else:
A__ : Union[str, Any] = path.split("::")[-1]
try:
A__ : List[str] = string_to_dict(_A , config.HUB_DATASETS_URL)["repo_id"]
A__ : List[Any] = token_per_repo_id.get(_A)
except ValueError:
A__ : Optional[int] = None
with xopen(_A , "rb" , use_auth_token=_A) as f:
A__ : Tuple = BytesIO(f.read())
A__ : List[str] = PIL.Image.open(bytes_)
else:
A__ : Optional[Any] = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def _lowercase ( self : Optional[Any]):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
def _lowercase ( self : List[Any] , _A : Union[pa.StringArray, pa.StructArray, pa.ListArray]):
if pa.types.is_string(storage.type):
A__ : Any = pa.array([None] * len(_A) , type=pa.binary())
A__ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
A__ : List[str] = pa.array([None] * len(_A) , type=pa.string())
A__ : Dict = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
A__ : Optional[Any] = storage.field("bytes")
else:
A__ : Tuple = pa.array([None] * len(_A) , type=pa.binary())
if storage.type.get_field_index("path") >= 0:
A__ : List[str] = storage.field("path")
else:
A__ : List[str] = pa.array([None] * len(_A) , type=pa.string())
A__ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
A__ : List[Any] = pa.array(
[encode_np_array(np.array(_A))["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A__ : Union[str, Any] = pa.array([None] * len(_A) , type=pa.string())
A__ : Optional[int] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null())
return array_cast(_A , self.pa_type)
def _lowercase ( self : List[Any] , _A : pa.StructArray):
@no_op_if_value_is_null
def path_to_bytes(_A : Dict):
with xopen(_A , "rb") as f:
A__ : int = f.read()
return bytes_
A__ : Union[str, Any] = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A__ : str = pa.array(
[os.path.basename(_A) if path is not None else None for path in storage.field("path").to_pylist()] , type=pa.string() , )
A__ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null())
return array_cast(_A , self.pa_type)
def snake_case__ ( ) -> List[str]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A__ : Any = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def snake_case__ ( __lowercase ) -> bytes:
"""simple docstring"""
A__ : Dict = BytesIO()
if image.format in list_image_compression_formats():
A__ : List[Any] = image.format
else:
A__ : List[str] = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__lowercase , format=__lowercase )
return buffer.getvalue()
def snake_case__ ( __lowercase ) -> dict:
"""simple docstring"""
if hasattr(__lowercase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__lowercase )}
def snake_case__ ( __lowercase ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
A__ : Optional[int] = array.dtype
A__ : List[str] = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
A__ : Dict = dtype.kind
A__ : Union[str, Any] = dtype.itemsize
A__ : int = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A__ : Union[str, Any] = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A__ : str = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A__ : Optional[Any] = dtype_byteorder + dtype_kind + str(__lowercase )
A__ : Union[str, Any] = np.dtype(__lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
A__ : Optional[int] = PIL.Image.fromarray(array.astype(__lowercase ) )
return {"path": None, "bytes": image_to_bytes(__lowercase )}
def snake_case__ ( __lowercase ) -> List[dict]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
A__ , A__ : List[str] = first_non_null_value(__lowercase )
if isinstance(__lowercase , __lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__lowercase , np.ndarray ):
A__ : List[str] = no_op_if_value_is_null(__lowercase )
return [obj_to_image_dict_func(__lowercase ) for obj in objs]
elif isinstance(__lowercase , PIL.Image.Image ):
A__ : Tuple = no_op_if_value_is_null(__lowercase )
return [obj_to_image_dict_func(__lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 182
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
snake_case : Any = logging.getLogger(__name__)
class lowerCAmelCase__ :
def __init__( self : Union[str, Any]):
A__ : Dict = False
def _lowercase ( self : Optional[int] , _A : int , _A : Optional[int] , _A : List[str] , _A : Optional[Any]):
if not self.initialized:
A__ : List[Any] = RagRetriever(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
A__ : Dict = True
def _lowercase ( self : Union[str, Any]):
self.retriever.index.init_index()
def _lowercase ( self : List[str] , _A : Optional[int] , _A : List[str]):
A__ , A__ : str = self.retriever._main_retrieve(_A , _A)
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase__ ( UpperCamelCase ):
def __init__( self : Optional[Any] , _A : Union[str, Any] , _A : Tuple , _A : int , _A : Tuple , _A : Union[str, Any]=None):
if index is not None and index.is_initialized() and len(_A) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py ")
super().__init__(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
A__ : str = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_A , _A , _A , _A)
for worker in self.retrieval_workers
])
def _lowercase ( self : Optional[Any]):
logger.info("initializing retrieval")
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _lowercase ( self : Union[str, Any] , _A : List[str] , _A : Optional[Any]):
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
A__ : Optional[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
A__ , A__ : List[Any] = ray.get(random_worker.retrieve.remote(_A , _A))
else:
A__ , A__ : Union[str, Any] = self._main_retrieve(_A , _A)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_A)
@classmethod
def _lowercase ( cls : int , _A : Optional[int] , _A : List[str]=None , **_A : Optional[Any]):
return super(_A , cls).get_tokenizers(_A , _A , **_A)
@classmethod
def _lowercase ( cls : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : List[str]=None , **_A : Optional[Any]):
A__ : str = kwargs.pop("config" , _A) or RagConfig.from_pretrained(_A , **_A)
A__ : int = RagTokenizer.from_pretrained(_A , config=_A)
A__ : List[str] = rag_tokenizer.question_encoder
A__ : str = rag_tokenizer.generator
if indexed_dataset is not None:
A__ : Tuple = "custom"
A__ : Tuple = CustomHFIndex(config.retrieval_vector_size , _A)
else:
A__ : Union[str, Any] = cls._build_index(_A)
return cls(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , retrieval_workers=_A , index=_A , )
| 182
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = 'wavlm'
def __init__( self: Union[str, Any] , _UpperCAmelCase: Optional[int]=32 , _UpperCAmelCase: Optional[int]=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Tuple=12 , _UpperCAmelCase: Union[str, Any]=3072 , _UpperCAmelCase: Union[str, Any]="gelu" , _UpperCAmelCase: List[Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: int=0.1 , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: List[str]="group" , _UpperCAmelCase: List[str]="gelu" , _UpperCAmelCase: Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase: Dict=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase: Dict=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase: List[Any]=False , _UpperCAmelCase: List[Any]=128 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Dict=320 , _UpperCAmelCase: int=800 , _UpperCAmelCase: List[Any]=False , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: int=0.0_5 , _UpperCAmelCase: List[str]=10 , _UpperCAmelCase: Optional[int]=2 , _UpperCAmelCase: Dict=0.0 , _UpperCAmelCase: List[str]=10 , _UpperCAmelCase: Dict=320 , _UpperCAmelCase: Any=2 , _UpperCAmelCase: List[str]=0.1 , _UpperCAmelCase: Any=100 , _UpperCAmelCase: Tuple=256 , _UpperCAmelCase: Tuple=256 , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Optional[Any]="mean" , _UpperCAmelCase: Dict=False , _UpperCAmelCase: int=False , _UpperCAmelCase: List[str]=256 , _UpperCAmelCase: int=(512, 512, 512, 512, 1500) , _UpperCAmelCase: List[Any]=(5, 3, 3, 1, 1) , _UpperCAmelCase: int=(1, 2, 3, 1, 1) , _UpperCAmelCase: Union[str, Any]=512 , _UpperCAmelCase: int=80 , _UpperCAmelCase: Optional[int]=0 , _UpperCAmelCase: Union[str, Any]=1 , _UpperCAmelCase: Union[str, Any]=2 , _UpperCAmelCase: List[Any]=False , _UpperCAmelCase: List[Any]=3 , _UpperCAmelCase: Optional[int]=2 , _UpperCAmelCase: str=3 , _UpperCAmelCase: int=None , **_UpperCAmelCase: List[str] , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
_lowerCAmelCase :int = hidden_size
_lowerCAmelCase :List[str] = feat_extract_norm
_lowerCAmelCase :Optional[int] = feat_extract_activation
_lowerCAmelCase :Union[str, Any] = list(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = list(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = conv_bias
_lowerCAmelCase :List[Any] = num_buckets
_lowerCAmelCase :Tuple = max_bucket_distance
_lowerCAmelCase :int = num_conv_pos_embeddings
_lowerCAmelCase :Optional[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase :Tuple = len(self.conv_dim )
_lowerCAmelCase :str = num_hidden_layers
_lowerCAmelCase :Union[str, Any] = intermediate_size
_lowerCAmelCase :int = hidden_act
_lowerCAmelCase :List[str] = num_attention_heads
_lowerCAmelCase :int = hidden_dropout
_lowerCAmelCase :Optional[Any] = attention_dropout
_lowerCAmelCase :str = activation_dropout
_lowerCAmelCase :Optional[int] = feat_proj_dropout
_lowerCAmelCase :Dict = final_dropout
_lowerCAmelCase :Union[str, Any] = layerdrop
_lowerCAmelCase :Dict = layer_norm_eps
_lowerCAmelCase :Optional[Any] = initializer_range
_lowerCAmelCase :List[str] = num_ctc_classes
_lowerCAmelCase :str = vocab_size
_lowerCAmelCase :List[Any] = do_stable_layer_norm
_lowerCAmelCase :List[str] = use_weighted_layer_sum
_lowerCAmelCase :List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase :str = apply_spec_augment
_lowerCAmelCase :Dict = mask_time_prob
_lowerCAmelCase :Tuple = mask_time_length
_lowerCAmelCase :Union[str, Any] = mask_time_min_masks
_lowerCAmelCase :Dict = mask_feature_prob
_lowerCAmelCase :Optional[int] = mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase :Dict = num_codevectors_per_group
_lowerCAmelCase :Union[str, Any] = num_codevector_groups
_lowerCAmelCase :List[str] = contrastive_logits_temperature
_lowerCAmelCase :Union[str, Any] = num_negatives
_lowerCAmelCase :Dict = codevector_dim
_lowerCAmelCase :Any = proj_codevector_dim
_lowerCAmelCase :Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase :Dict = ctc_loss_reduction
_lowerCAmelCase :Any = ctc_zero_infinity
# adapter
_lowerCAmelCase :Optional[int] = add_adapter
_lowerCAmelCase :Union[str, Any] = adapter_kernel_size
_lowerCAmelCase :Tuple = adapter_stride
_lowerCAmelCase :Any = num_adapter_layers
_lowerCAmelCase :List[str] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase :str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase :Any = list(_UpperCAmelCase )
_lowerCAmelCase :Dict = list(_UpperCAmelCase )
_lowerCAmelCase :Dict = list(_UpperCAmelCase )
_lowerCAmelCase :str = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self: Any ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 687
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Any = PandasConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :Any = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Any = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase )
| 687
| 1
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, snake_case__, snake_case__=13, snake_case__=7, snake_case__=True, snake_case__=True, snake_case__=True, snake_case__=True, snake_case__=99, snake_case__=32, snake_case__=5, snake_case__=4, snake_case__=37, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=5_12, snake_case__=16, snake_case__=2, snake_case__=0.02, snake_case__=4, ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = parent
lowercase_ : Union[str, Any] = batch_size
lowercase_ : List[str] = seq_length
lowercase_ : List[str] = is_training
lowercase_ : Tuple = use_attention_mask
lowercase_ : List[Any] = use_token_type_ids
lowercase_ : int = use_labels
lowercase_ : Optional[Any] = vocab_size
lowercase_ : List[Any] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : Dict = hidden_act
lowercase_ : str = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : List[str] = type_vocab_size
lowercase_ : str = type_sequence_label_size
lowercase_ : Dict = initializer_range
lowercase_ : List[str] = num_choices
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase_ : Dict = None
if self.use_attention_mask:
lowercase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Dict = DistilBertConfig(
vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, tie_weights_=snake_case__, )
return config, input_ids, attention_mask
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
lowercase_ : str = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = config_and_inputs
lowercase_ : int = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__a : Union[str, Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Any = FlaxDistilBertModelTester(self )
@slow
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ : Any = model_class_name.from_pretrained("""distilbert-base-uncased""" )
lowercase_ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
lowercase_ : Optional[int] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowercase_ : Optional[int] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
lowercase_ : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase_ : int = model(snake_case__, attention_mask=snake_case__ )[0]
lowercase_ : List[Any] = (1, 11, 7_68)
self.assertEqual(output.shape, snake_case__ )
lowercase_ : Any = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], snake_case__, atol=1E-4 ) )
| 436
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowercase , lowercase , lowercase = False ) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(lowercase ), magnitude * sin(lowercase )]
return [magnitude * cos(radians(lowercase ) ), magnitude * sin(radians(lowercase ) )]
def __magic_name__ ( lowercase , lowercase , lowercase = 10**-1 ) -> bool:
"""simple docstring"""
lowercase_ : NDArray[floataa] = cross(lowercase , lowercase )
lowercase_ : float = sum(lowercase )
return abs(lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase_ = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
UpperCAmelCase_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase_ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
UpperCAmelCase_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase_ = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
UpperCAmelCase_ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 436
| 1
|
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase :Dict = logging.get_logger(__name__)
lowerCamelCase :int = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
A_ : Any = TOKENIZER_CLASSES
else:
A_ : List[str] = {tokenizer_name: getattr(_UpperCamelCase , tokenizer_name + """Fast""" )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
A_ : str = TOKENIZER_CLASSES[tokenizer_name]
A_ : List[Any] = True
if checkpoint_name is None:
A_ : Dict = list(tokenizer_class.max_model_input_sizes.keys() )
else:
A_ : int = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
A_ : int = tokenizer_class.from_pretrained(_UpperCamelCase , force_download=_UpperCamelCase )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
A_, A_ : Any = checkpoint.split("""/""" )
A_ : Optional[int] = os.path.join(_UpperCamelCase , _UpperCamelCase )
elif add_prefix:
A_ : List[Any] = checkpoint
A_ : Optional[int] = dump_path
else:
A_ : Optional[Any] = None
A_ : Union[str, Any] = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
A_ : Optional[int] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
A_ : List[str] = file_path.split(_UpperCamelCase )[-1][0]
if next_char == "/":
A_ : List[str] = os.path.join(_UpperCamelCase , _UpperCamelCase )
A_ : int = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
A_ : Dict = tokenizer.save_pretrained(
_UpperCamelCase , legacy_format=_UpperCamelCase , filename_prefix=_UpperCamelCase )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(_UpperCamelCase )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
lowerCamelCase :Union[str, Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 667
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
snake_case_ : Optional[int] = '''scheduler_config.json'''
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
_lowerCAmelCase = 5
@dataclass
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = 42
class A_ :
'''simple docstring'''
_lowerCAmelCase = SCHEDULER_CONFIG_NAME
_lowerCAmelCase = ["""dtype"""]
_lowerCAmelCase = []
_lowerCAmelCase = True
@classmethod
def a ( cls , A_ = None , A_ = None , A_=False , **A_ , ):
_UpperCamelCase , _UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
_UpperCamelCase , _UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , "create_state" ) and getattr(A_ , "has_state" , A_ ):
_UpperCamelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def a ( self , A_ , A_ = False , **A_ ):
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def a ( self ):
return self._get_compatibles()
@classmethod
def a ( cls ):
_UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
_UpperCamelCase = importlib.import_module(__name__.split("." )[0] )
_UpperCamelCase = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def lowercase__( _UpperCamelCase : jnp.ndarray , _UpperCamelCase : Tuple[int] )-> jnp.ndarray:
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def lowercase__( _UpperCamelCase : int , _UpperCamelCase : Tuple=0.999 , _UpperCamelCase : Any=jnp.floataa )-> jnp.ndarray:
"""simple docstring"""
def alpha_bar(_UpperCamelCase : Any ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
_UpperCamelCase = []
for i in range(_UpperCamelCase ):
_UpperCamelCase = i / num_diffusion_timesteps
_UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class A_ :
'''simple docstring'''
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@classmethod
def a ( cls , A_ ):
_UpperCamelCase = scheduler.config
if config.trained_betas is not None:
_UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCamelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
_UpperCamelCase = 1.0 - betas
_UpperCamelCase = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def lowercase__( _UpperCamelCase : CommonSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = state.alphas_cumprod
_UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
_UpperCamelCase = sqrt_alpha_prod.flatten()
_UpperCamelCase = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
_UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
_UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
_UpperCamelCase = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowercase__( _UpperCamelCase : CommonSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray )-> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowercase__( _UpperCamelCase : CommonSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray )-> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 138
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : str = logging.get_logger(__name__)
A : str = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase( _a ):
snake_case_ : List[Any] = """fnet"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Dict=3_2_0_0_0 , SCREAMING_SNAKE_CASE : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE : Any=3_0_7_2 , SCREAMING_SNAKE_CASE : Tuple="gelu_new" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Dict=5_1_2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Optional[Any]=1e-1_2 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : int=5_1_2 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : Optional[int]=2 , **SCREAMING_SNAKE_CASE : List[Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = initializer_range
__snake_case = type_vocab_size
__snake_case = layer_norm_eps
__snake_case = use_tpu_fourier_optimizations
__snake_case = tpu_short_seq_length
| 717
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : int = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCamelCase( _a ):
snake_case_ : str = """blenderbot-small"""
snake_case_ : List[Any] = ["""past_key_values"""]
snake_case_ : Any = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE : Any=8 , SCREAMING_SNAKE_CASE : int=2_0_4_8 , SCREAMING_SNAKE_CASE : Any=1_6 , SCREAMING_SNAKE_CASE : Tuple=8 , SCREAMING_SNAKE_CASE : List[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE : Optional[int]=1_6 , SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE : str=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE : Dict=5_1_2 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : Any=0.0 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Union[str, Any]=0 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Any=2 , **SCREAMING_SNAKE_CASE : Dict , ) -> Optional[int]:
'''simple docstring'''
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , forced_eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
class UpperCamelCase( _a ):
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__snake_case = {0: "batch"}
__snake_case = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__snake_case = {0: "batch", 1: "decoder_sequence"}
__snake_case = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(SCREAMING_SNAKE_CASE ):
__snake_case = {0: "batch", 2: "past_sequence + sequence"}
__snake_case = {0: "batch", 2: "past_sequence + sequence"}
else:
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super().outputs
else:
__snake_case = super(SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(SCREAMING_SNAKE_CASE ):
__snake_case = {0: "batch", 2: "past_sequence + sequence"}
__snake_case = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Generate decoder inputs
__snake_case = seq_length if not self.use_past else 1
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__snake_case = dict(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case , __snake_case = common_inputs["input_ids"].shape
__snake_case = common_inputs["decoder_input_ids"].shape[1]
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = decoder_seq_length + 3
__snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__snake_case = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )] , dim=1 )
__snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__snake_case , __snake_case = self.num_layers
__snake_case = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - min_num_layers
__snake_case = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
__snake_case = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case , __snake_case = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case , __snake_case = self.num_layers
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = common_inputs["attention_mask"].dtype
__snake_case = torch.cat(
[common_inputs["attention_mask"], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 )
__snake_case = [
(torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(SCREAMING_SNAKE_CASE )
]
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE )
__snake_case = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
__snake_case = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__snake_case = dict(tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE ) )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
elif self.task == "causal-lm":
__snake_case = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
else:
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__snake_case = super(SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 473
| 0
|
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(_SCREAMING_SNAKE_CASE )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(_SCREAMING_SNAKE_CASE )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_SCREAMING_SNAKE_CASE ) , 2_6 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return "".join(cipher_map.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def snake_case_ ( ):
__lowercase = input("Enter message to encode or decode: " ).strip()
__lowercase = input("Enter keyword: " ).strip()
__lowercase = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
__lowercase = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
__lowercase = create_cipher_map(_SCREAMING_SNAKE_CASE )
print(func(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 402
|
from collections import defaultdict
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = 1
__lowercase = True
for v in tree[start]:
if v not in visited:
ret += dfs(_SCREAMING_SNAKE_CASE )
if ret % 2 == 0:
cuts.append(_SCREAMING_SNAKE_CASE )
return ret
def snake_case_ ( ):
dfs(1 )
if __name__ == "__main__":
snake_case__ , snake_case__ : Optional[int] = 10, 9
snake_case__ : Optional[Any] = defaultdict(list)
snake_case__ : dict[int, bool] = {}
snake_case__ : list[int] = []
snake_case__ : List[str] = 0
snake_case__ : int = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 402
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class a :
def __init__( self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=2 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_12 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.0_2 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> int:
_a = parent
_a = 13
_a = 7
_a = True
_a = True
_a = True
_a = True
_a = 99
_a = 32
_a = 2
_a = 4
_a = 37
_a = 'gelu'
_a = 0.1
_a = 0.1
_a = 5_12
_a = 16
_a = 2
_a = 0.0_2
_a = 3
_a = 4
_a = None
def __UpperCAmelCase ( self ) -> List[str]:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__magic_name__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
_a = TFRoFormerModel(config=__magic_name__ )
_a = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_a = [input_ids, input_mask]
_a = model(__magic_name__ )
_a = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
_a = True
_a = TFRoFormerForCausalLM(config=__magic_name__ )
_a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a = model(__magic_name__ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = TFRoFormerForMaskedLM(config=__magic_name__ )
_a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
_a = self.num_labels
_a = TFRoFormerForSequenceClassification(config=__magic_name__ )
_a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
_a = self.num_choices
_a = TFRoFormerForMultipleChoice(config=__magic_name__ )
_a = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
_a = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
_a = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
_a = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_a = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
_a = self.num_labels
_a = TFRoFormerForTokenClassification(config=__magic_name__ )
_a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
_a = TFRoFormerForQuestionAnswering(config=__magic_name__ )
_a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __UpperCAmelCase ( self ) -> str:
_a = TFRoFormerModelTester(self )
_a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __UpperCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __UpperCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(__magic_name__ )
@require_tf
class a ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ) -> List[str]:
_a = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_a = tf.constant([[0, 1, 2, 3, 4, 5]] )
_a = model(__magic_name__ )[0]
# TODO Replace vocab size
_a = 5_00_00
_a = [1, 6, vocab_size]
self.assertEqual(output.shape , __magic_name__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_a = tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1e-4 )
@require_tf
class a ( unittest.TestCase ):
_lowerCAmelCase = 1E-4
def __UpperCAmelCase ( self ) -> List[Any]:
_a = tf.constant([[4, 10]] )
_a = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_a = emba(input_ids.shape )
_a = tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(__magic_name__ , __magic_name__ , atol=self.tolerance )
def __UpperCAmelCase ( self ) -> int:
_a = tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
_a = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
_a = emba.weight[:3, :5]
tf.debugging.assert_near(__magic_name__ , __magic_name__ , atol=self.tolerance )
@require_tf
class a ( unittest.TestCase ):
_lowerCAmelCase = 1E-4
def __UpperCAmelCase ( self ) -> int:
# 2,12,16,64
_a = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
_a = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
_a = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_a = embed_positions([2, 16, 7_68] )[None, None, :, :]
_a , _a = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__magic_name__ , __magic_name__ , __magic_name__ )
_a = tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
_a = tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __magic_name__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __magic_name__ , atol=self.tolerance )
| 716
|
'''simple docstring'''
def _A (lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _A (lowerCAmelCase__ :dict[int, list[int]] ) -> list[tuple[int, int]]:
'''simple docstring'''
_a = 0
_a = len(lowerCAmelCase__ ) # No of vertices in graph
_a = [0] * n
_a = [False] * n
def dfs(lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ):
_a = True
_a = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , id_ )
_a = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_a = min(low[at] , low[to] )
_a = []
for i in range(lowerCAmelCase__ ):
if not visited[i]:
dfs(lowerCAmelCase__ , -1 , lowerCAmelCase__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 532
| 0
|
'''simple docstring'''
def lowercase_ ( _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
_enforce_args(_lowercase , _lowercase )
if n == 0:
return 0
lowerCamelCase_ : Optional[int] = float('''-inf''' )
for i in range(1 , n + 1 ):
lowerCamelCase_ : Tuple = max(
_lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , _lowercase ) )
return max_revue
def lowercase_ ( _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
_enforce_args(_lowercase , _lowercase )
lowerCamelCase_ : int = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_lowercase , _lowercase , _lowercase )
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCamelCase_ : str = float('''-inf''' )
for i in range(1 , n + 1 ):
lowerCamelCase_ : Any = max(
_lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _lowercase , _lowercase ) , )
lowerCamelCase_ : List[str] = max_revenue
return max_rev[n]
def lowercase_ ( _lowercase , _lowercase ) -> Any:
'''simple docstring'''
_enforce_args(_lowercase , _lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCamelCase_ : int = [float('''-inf''' ) for _ in range(n + 1 )]
lowerCamelCase_ : str = 0
for i in range(1 , n + 1 ):
lowerCamelCase_ : Dict = max_rev[i]
for j in range(1 , i + 1 ):
lowerCamelCase_ : Optional[Any] = max(_lowercase , prices[j - 1] + max_rev[i - j] )
lowerCamelCase_ : Optional[Any] = max_revenue_i
return max_rev[n]
def lowercase_ ( _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
if n < 0:
lowerCamelCase_ : Dict = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(_lowercase )
if n > len(_lowercase ):
lowerCamelCase_ : str = (
'''Each integral piece of rod must have a corresponding price. '''
F"""Got n = {n} but length of prices = {len(_lowercase )}"""
)
raise ValueError(_lowercase )
def lowercase_ ( ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : int = [6, 10, 12, 15, 20, 23]
lowerCamelCase_ : str = len(_lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCamelCase_ : Tuple = 36
lowerCamelCase_ : Optional[int] = top_down_cut_rod(_lowercase , _lowercase )
lowerCamelCase_ : Optional[Any] = bottom_up_cut_rod(_lowercase , _lowercase )
lowerCamelCase_ : List[str] = naive_cut_rod_recursive(_lowercase , _lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 422
|
'''simple docstring'''
from collections.abc import Sequence
def lowercase_ ( _lowercase , _lowercase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowercase ) )
def lowercase_ ( _lowercase , _lowercase ) -> float:
'''simple docstring'''
lowerCamelCase_ : Dict = 0.0
for coeff in reversed(_lowercase ):
lowerCamelCase_ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
__lowercase : Any = (0.0, 0.0, 5.0, 9.3, 7.0)
__lowercase : Optional[int] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 422
| 1
|
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
_lowercase : Dict = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase_ ) )
return round(lowerCamelCase_ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
import pytest
SCREAMING_SNAKE_CASE : Optional[Any] = "__dummy_dataset1__"
SCREAMING_SNAKE_CASE : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def UpperCamelCase_( ) -> Dict:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase_( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : List[str] = dataset_loading_script_name
_lowercase : Union[str, Any] = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=lowerCamelCase_ )
_lowercase : Optional[int] = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
| 354
| 1
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
"""simple docstring"""
def UpperCAmelCase ( self : Tuple , __lowercase : List[Any] ) -> Optional[int]:
__UpperCAmelCase : str = [self.constructed_objects[key_node] for key_node, _ in node.value]
__UpperCAmelCase : Optional[Any] = [tuple(__lowercase ) if isinstance(__lowercase , __lowercase ) else key for key in keys]
__UpperCAmelCase : List[Any] = Counter(__lowercase )
__UpperCAmelCase : List[str] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCAmelCase ( self : List[Any] , __lowercase : Any , __lowercase : str=False ) -> int:
__UpperCAmelCase : Union[str, Any] = super().construct_mapping(__lowercase , deep=__lowercase )
self._check_no_duplicates_on_constructed_node(__lowercase )
return mapping
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : Dict = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__UpperCAmelCase : int = full_content[1:].index("""---""" ) + 1
__UpperCAmelCase : Any = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__lowerCamelCase )
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , __lowercase : Path ) -> "DatasetMetadata":
with open(__lowercase , encoding="""utf-8""" ) as readme_file:
__UpperCAmelCase , __UpperCAmelCase : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__lowercase )
else:
return cls()
def UpperCAmelCase ( self : Optional[int] , __lowercase : Path ) -> Tuple:
if path.exists():
with open(__lowercase , encoding="""utf-8""" ) as readme_file:
__UpperCAmelCase : int = readme_file.read()
else:
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[Any] = self._to_readme(__lowercase )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(__lowercase )
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[str] = None ) -> str:
if readme_content is not None:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = _split_yaml_from_readme(__lowercase )
__UpperCAmelCase : Any = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
__UpperCAmelCase : Tuple = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def UpperCAmelCase ( cls : Any , __lowercase : str ) -> "DatasetMetadata":
__UpperCAmelCase : Tuple = yaml.load(__lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__UpperCAmelCase : Tuple = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__lowercase )
def UpperCAmelCase ( self : Any ) -> str:
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__lowercase , allow_unicode=__lowercase , encoding="""utf-8""" , ).decode("""utf-8""" )
a : Any = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a : List[Any] = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
a : Tuple = ap.parse_args()
a : List[Any] = Path(args.readme_filepath)
a : Dict = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 63
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : Any = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = '''donut-swin'''
_snake_case = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCamelCase__=2_2_4 , lowerCamelCase__=4 , lowerCamelCase__=3 , lowerCamelCase__=9_6 , lowerCamelCase__=[2, 2, 6, 2] , lowerCamelCase__=[3, 6, 1_2, 2_4] , lowerCamelCase__=7 , lowerCamelCase__=4.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=False , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , **lowerCamelCase__ , ):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(lowerCamelCase__ )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
| 212
| 0
|
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = XLMProphetNetTokenizer
lowerCAmelCase = False
lowerCAmelCase = True
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A : Union[str, Any] = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = '[PAD]'
__A : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase) , _UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase) , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '[PAD]')
self.assertEqual(vocab_keys[1] , '[CLS]')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(_UpperCAmelCase) , 1012)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase)
__A : str = tokenizer.tokenize('This is a test')
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__A : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__A : int = tokenizer.convert_tokens_to_ids(_UpperCAmelCase)
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__A : str = tokenizer.convert_ids_to_tokens(_UpperCAmelCase)
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased')
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = 'Hello World!'
__A : Union[str, Any] = [3_5389, 6672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase))
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = {'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 338
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ : str = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338
| 1
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCAmelCase = 6_37_81_37.0
UpperCAmelCase = 6_35_67_52.31_42_45
UpperCAmelCase = 6_378_137
def lowercase ( a__ : float , a__ : float , a__ : float , a__ : float ) -> float:
_UpperCamelCase = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCamelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
_UpperCamelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCamelCase = haversine_distance(a__ , a__ , a__ , a__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCamelCase = (b_lata + b_lata) / 2
_UpperCamelCase = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCamelCase = (sin(a__ ) ** 2) * (cos(a__ ) ** 2)
_UpperCamelCase = cos(sigma / 2 ) ** 2
_UpperCamelCase = (sigma - sin(a__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCamelCase = (cos(a__ ) ** 2) * (sin(a__ ) ** 2)
_UpperCamelCase = sin(sigma / 2 ) ** 2
_UpperCamelCase = (sigma + sin(a__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420
|
"""simple docstring"""
import math
def lowercase ( a__ : int ) -> str:
_UpperCamelCase = 0
_UpperCamelCase = 0
while num > 0:
_UpperCamelCase = num % 8
_UpperCamelCase = octal + (remainder * math.floor(math.pow(10 , a__ ) ))
counter += 1
_UpperCamelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(a__ )}'''
def lowercase ( ) -> None:
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 420
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__a: List[str] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Tuple = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: List[Any] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__a: Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 428
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a: Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : str = PegasusTokenizer(__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCAmelCase( self ) -> List[str]:
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]:
return ("This is a test", "This is a test")
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[str] = '''</s>'''
lowercase__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(__lowerCAmelCase ) , 1103 )
def _lowerCAmelCase( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Any = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowercase__ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
lowercase__ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowercase__ : int = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
lowercase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowercase__ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowercase__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
lowercase__ : str = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : List[str] = ['''This is going to be way too long.''' * 150, '''short example''']
lowercase__ : Tuple = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase__ : Optional[Any] = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
lowercase__ : Dict = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def _lowerCAmelCase( self ) -> int:
# fmt: off
lowercase__ : List[Any] = {'''input_ids''': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : Optional[Any] = PegasusTokenizer(__lowerCAmelCase , offset=0 , mask_token_sent=__lowerCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCAmelCase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
return ("This is a test", "This is a test")
def _lowerCAmelCase( self ) -> int:
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowercase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
lowercase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_torch
def _lowerCAmelCase( self ) -> str:
lowercase__ : List[str] = ['''This is going to be way too long.''' * 1000, '''short example''']
lowercase__ : Dict = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase__ : Union[str, Any] = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
lowercase__ : str = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowercase__ : Dict = self._large_tokenizer(__lowerCAmelCase ).input_ids
self.assertListEqual(
__lowerCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 428
| 1
|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Any:
lowercase__ : Optional[Any] = checkpoint
lowercase__ : List[str] = {}
lowercase__ : Any = vae_state_dict['''encoder.conv_in.weight''']
lowercase__ : Any = vae_state_dict['''encoder.conv_in.bias''']
lowercase__ : str = vae_state_dict['''encoder.conv_out.weight''']
lowercase__ : List[str] = vae_state_dict['''encoder.conv_out.bias''']
lowercase__ : List[str] = vae_state_dict['''encoder.norm_out.weight''']
lowercase__ : Optional[int] = vae_state_dict['''encoder.norm_out.bias''']
lowercase__ : List[str] = vae_state_dict['''decoder.conv_in.weight''']
lowercase__ : int = vae_state_dict['''decoder.conv_in.bias''']
lowercase__ : Union[str, Any] = vae_state_dict['''decoder.conv_out.weight''']
lowercase__ : Optional[int] = vae_state_dict['''decoder.conv_out.bias''']
lowercase__ : str = vae_state_dict['''decoder.norm_out.weight''']
lowercase__ : Union[str, Any] = vae_state_dict['''decoder.norm_out.bias''']
lowercase__ : Optional[int] = vae_state_dict['''quant_conv.weight''']
lowercase__ : Union[str, Any] = vae_state_dict['''quant_conv.bias''']
lowercase__ : str = vae_state_dict['''post_quant_conv.weight''']
lowercase__ : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
lowercase__ : str = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
lowercase__ : Optional[Any] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
lowercase__ : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
lowercase__ : List[str] = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
for i in range(__lowerCamelCase ):
lowercase__ : str = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
lowercase__ : Any = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
lowercase__ : List[Any] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
lowercase__ : int = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : str = {'''old''': f"""down.{i}.block""", '''new''': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : Union[str, Any] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
lowercase__ : Tuple = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase__ : str = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
lowercase__ : Optional[Any] = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : Tuple = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : Dict = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
lowercase__ : Union[str, Any] = renew_vae_attention_paths(__lowerCamelCase )
lowercase__ : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
for i in range(__lowerCamelCase ):
lowercase__ : int = num_up_blocks - 1 - i
lowercase__ : int = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
lowercase__ : Optional[int] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
lowercase__ : List[str] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
lowercase__ : Optional[int] = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : List[str] = {'''old''': f"""up.{block_id}.block""", '''new''': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : List[str] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
lowercase__ : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase__ : int = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
lowercase__ : Dict = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : str = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : Dict = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
lowercase__ : Tuple = renew_vae_attention_paths(__lowerCamelCase )
lowercase__ : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
return new_checkpoint
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , ) -> Optional[Any]:
# Only support V1
lowercase__ : List[Any] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
lowercase__ : List[str] = io.BytesIO(r.content )
lowercase__ : Union[str, Any] = OmegaConf.load(__lowerCamelCase )
lowercase__ : int = 5_12
lowercase__ : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
lowercase__ : int = {}
with safe_open(__lowerCamelCase , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
lowercase__ : Optional[int] = f.get_tensor(__lowerCamelCase )
else:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )['''state_dict''']
# Convert the VAE model.
lowercase__ : Optional[int] = create_vae_diffusers_config(__lowerCamelCase , image_size=__lowerCamelCase )
lowercase__ : Tuple = custom_convert_ldm_vae_checkpoint(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = AutoencoderKL(**__lowerCamelCase )
vae.load_state_dict(__lowerCamelCase )
vae.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
lowerCAmelCase_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 560
|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Any:
lowercase__ : Optional[Any] = checkpoint
lowercase__ : List[str] = {}
lowercase__ : Any = vae_state_dict['''encoder.conv_in.weight''']
lowercase__ : Any = vae_state_dict['''encoder.conv_in.bias''']
lowercase__ : str = vae_state_dict['''encoder.conv_out.weight''']
lowercase__ : List[str] = vae_state_dict['''encoder.conv_out.bias''']
lowercase__ : List[str] = vae_state_dict['''encoder.norm_out.weight''']
lowercase__ : Optional[int] = vae_state_dict['''encoder.norm_out.bias''']
lowercase__ : List[str] = vae_state_dict['''decoder.conv_in.weight''']
lowercase__ : int = vae_state_dict['''decoder.conv_in.bias''']
lowercase__ : Union[str, Any] = vae_state_dict['''decoder.conv_out.weight''']
lowercase__ : Optional[int] = vae_state_dict['''decoder.conv_out.bias''']
lowercase__ : str = vae_state_dict['''decoder.norm_out.weight''']
lowercase__ : Union[str, Any] = vae_state_dict['''decoder.norm_out.bias''']
lowercase__ : Optional[int] = vae_state_dict['''quant_conv.weight''']
lowercase__ : Union[str, Any] = vae_state_dict['''quant_conv.bias''']
lowercase__ : str = vae_state_dict['''post_quant_conv.weight''']
lowercase__ : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
lowercase__ : str = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
lowercase__ : Optional[Any] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
lowercase__ : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
lowercase__ : List[str] = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
for i in range(__lowerCamelCase ):
lowercase__ : str = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
lowercase__ : Any = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
lowercase__ : List[Any] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
lowercase__ : int = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : str = {'''old''': f"""down.{i}.block""", '''new''': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : Union[str, Any] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
lowercase__ : Tuple = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase__ : str = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
lowercase__ : Optional[Any] = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : Tuple = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : Dict = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
lowercase__ : Union[str, Any] = renew_vae_attention_paths(__lowerCamelCase )
lowercase__ : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
for i in range(__lowerCamelCase ):
lowercase__ : int = num_up_blocks - 1 - i
lowercase__ : int = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
lowercase__ : Optional[int] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
lowercase__ : List[str] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
lowercase__ : Optional[int] = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : List[str] = {'''old''': f"""up.{block_id}.block""", '''new''': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : List[str] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
lowercase__ : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase__ : int = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
lowercase__ : Dict = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : str = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : Dict = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
lowercase__ : Tuple = renew_vae_attention_paths(__lowerCamelCase )
lowercase__ : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
return new_checkpoint
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , ) -> Optional[Any]:
# Only support V1
lowercase__ : List[Any] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
lowercase__ : List[str] = io.BytesIO(r.content )
lowercase__ : Union[str, Any] = OmegaConf.load(__lowerCamelCase )
lowercase__ : int = 5_12
lowercase__ : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
lowercase__ : int = {}
with safe_open(__lowerCamelCase , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
lowercase__ : Optional[int] = f.get_tensor(__lowerCamelCase )
else:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )['''state_dict''']
# Convert the VAE model.
lowercase__ : Optional[int] = create_vae_diffusers_config(__lowerCamelCase , image_size=__lowerCamelCase )
lowercase__ : Tuple = custom_convert_ldm_vae_checkpoint(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = AutoencoderKL(**__lowerCamelCase )
vae.load_state_dict(__lowerCamelCase )
vae.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
lowerCAmelCase_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 560
| 1
|
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = tmp_path / '''cache'''
A : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Any = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_sql_dataset(snake_case__ , snake_case__ )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = tmp_path / '''cache'''
A : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A : List[Any] = features.copy() if features else default_expected_features
A : str = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
A : List[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_sql_dataset(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
with contextlib.closing(sqlitea.connect(snake_case__ ) ) as con:
A : int = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = tmp_path / '''cache'''
A : str = os.path.join(snake_case__ , '''tmp.sql''' )
A : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case__ ).read()
SqlDatasetWriter(snake_case__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
A : int = iter_sql_file(snake_case__ )
A : List[Any] = iter_sql_file(snake_case__ )
for rowa, rowa in zip(snake_case__ , snake_case__ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = tmp_path / '''cache'''
A : List[str] = os.path.join(snake_case__ , '''tmp.sql''' )
A : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case__ ).read()
SqlDatasetWriter(snake_case__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
A : int = iter_sql_file(snake_case__ )
A : int = iter_sql_file(snake_case__ )
for rowa, rowa in zip(snake_case__ , snake_case__ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = tmp_path / '''cache'''
A : Dict = os.path.join(snake_case__ , '''tmp.sql''' )
A : Optional[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case__ ).read()
with pytest.raises(snake_case__ ):
SqlDatasetWriter(snake_case__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 343
|
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase : Tuple = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowercase : List[str] = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowercase : Any = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : int = 0.0
for i, j in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else 0.0
A : Tuple = n_correct / len(SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
}
| 343
| 1
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_A : Optional[int] = _symbol_database.Default()
_A : Any = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_A : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_A : List[Any] = None
_A : Any = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_A : Union[str, Any] = 45
_A : Optional[int] = 15_81
_A : int = 15_17
_A : List[Any] = 15_70
_A : Dict = 15_84
_A : List[str] = 17_93
_A : List[str] = 17_95
_A : List[Any] = 19_16
_A : List[str] = 18_64
_A : int = 19_05
_A : str = 19_19
_A : Any = 24_29
_A : Optional[Any] = 22_08
_A : Optional[Any] = 24_18
_A : str = 23_23
_A : Tuple = 24_07
# @@protoc_insertion_point(module_scope)
| 100
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = 13
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 30
SCREAMING_SNAKE_CASE__ = self.seq_length + self.mem_len
SCREAMING_SNAKE_CASE__ = 15
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = 99
SCREAMING_SNAKE_CASE__ = [10, 50, 80]
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1_28
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = self.vocab_size - 1
SCREAMING_SNAKE_CASE__ = 0.01
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowercase_ ( self ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLModel(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a, '''mems''': mems_a}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLLMHeadModel(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model([input_ids_a, mems_a] ).to_tuple()
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLForSequenceClassification(A_ )
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase__ : List[str] = () if is_tf_available() else ()
lowerCamelCase__ : List[Any] = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[Any] = False
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , d_embed=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(A_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
assert isinstance(A_ , tf.keras.layers.Layer )
SCREAMING_SNAKE_CASE__ = model.get_bias()
assert name is None
else:
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE__ = model.get_bias()
assert name is None
def lowercase_ ( self ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFTransfoXLModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
SCREAMING_SNAKE_CASE__ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
SCREAMING_SNAKE_CASE__ = model.generate(A_ , max_length=2_00 , do_sample=A_ )
self.assertListEqual(output_ids[0].numpy().tolist() , A_ )
| 100
| 1
|
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__UpperCamelCase : Tuple = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
__UpperCamelCase : List[Any] = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
__UpperCamelCase : List[str] = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def __UpperCAmelCase ( _snake_case : Dict, _snake_case : Any ):
return float((preds == labels).mean() )
def __UpperCAmelCase ( _snake_case : Optional[Any], _snake_case : Union[str, Any] ):
_lowercase = simple_accuracy(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
_lowercase = float(fa_score(y_true=SCREAMING_SNAKE_CASE_, y_pred=SCREAMING_SNAKE_CASE_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( _snake_case : Dict, _snake_case : int ):
_lowercase = np.array(SCREAMING_SNAKE_CASE_ )
_lowercase = np.array(SCREAMING_SNAKE_CASE_ )
_lowercase = en_sentvecs.shape[0]
# mean centering
_lowercase = en_sentvecs - np.mean(SCREAMING_SNAKE_CASE_, axis=0 )
_lowercase = in_sentvecs - np.mean(SCREAMING_SNAKE_CASE_, axis=0 )
_lowercase = cdist(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, "cosine" )
_lowercase = np.array(range(SCREAMING_SNAKE_CASE_ ) )
_lowercase = sim.argsort(axis=1 )[:, :1_0]
_lowercase = np.any(preds == actual[:, None], axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[int] ) -> int:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _lowerCamelCase ( self : List[Any] , _lowercase : Optional[int] , _lowercase : int ) -> List[str]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_lowercase , _lowercase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_lowercase , _lowercase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 703
|
"""simple docstring"""
import math
from collections.abc import Callable
def __UpperCAmelCase ( _snake_case : Callable[[float], float], _snake_case : float, _snake_case : float ):
_lowercase = xa
_lowercase = xa
while True:
if x_n == x_na or function(_snake_case ) == function(_snake_case ):
raise ZeroDivisionError("float division by zero, could not find root" )
_lowercase = x_na - (
function(_snake_case ) / ((function(_snake_case ) - function(_snake_case )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
_lowercase = x_na
_lowercase = x_na
def __UpperCAmelCase ( _snake_case : float ):
return math.pow(_snake_case, 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 227
| 0
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , _a : Any , _a : Optional[Any]=99 , _a : Dict=13 , _a : Dict=7 , _a : Any=9 , _a : List[str]=True , _a : Tuple=True , _a : Optional[int]=False , _a : Optional[Any]=32 , _a : int=5 , _a : Dict=4 , _a : List[Any]=37 , _a : Tuple=8 , _a : List[Any]=0.1 , _a : str=0.002 , _a : Optional[Any]=1 , _a : int=0 , _a : List[Any]=0 , _a : str=None , _a : Union[str, Any]=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = encoder_seq_length
UpperCamelCase__ = decoder_seq_length
# For common tests
UpperCamelCase__ = self.decoder_seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_attention_mask
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = d_ff
UpperCamelCase__ = relative_attention_num_buckets
UpperCamelCase__ = dropout_rate
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = decoder_start_token_id
UpperCamelCase__ = None
UpperCamelCase__ = decoder_layers
def A_ ( self : int ):
return TaConfig.from_pretrained('''google/umt5-base''' )
def A_ ( self : str , _a : Any , _a : Optional[int] , _a : Any , _a : List[Any]=None , _a : Any=None , _a : List[str]=None , _a : int=None , _a : List[str]=None , ):
if attention_mask is None:
UpperCamelCase__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase__ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase_ )
if decoder_head_mask is None:
UpperCamelCase__ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase_ )
if cross_attn_head_mask is None:
UpperCamelCase__ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def A_ ( self : Tuple ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase__ = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ = self.get_config()
UpperCamelCase__ = config.num_attention_heads
UpperCamelCase__ = self.prepare_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, input_dict
def A_ ( self : Tuple ):
UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Any ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : int ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : List[str] , _a : str , _a : Any , _a : Union[str, Any] , _a : List[Any] , _a : int , _a : int , ):
UpperCamelCase__ = UMTaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase__ = model(
input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ = model(input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ )
UpperCamelCase__ = result.last_hidden_state
UpperCamelCase__ = result.past_key_values
UpperCamelCase__ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def A_ ( self : Tuple , _a : List[Any] , _a : Optional[Any] , _a : Union[str, Any] , _a : int , _a : List[Any] , _a : List[str] , ):
UpperCamelCase__ = UMTaModel(config=lowerCamelCase_ ).get_decoder().to(lowerCamelCase_ ).eval()
# first forward pass
UpperCamelCase__ = model(lowerCamelCase_ , use_cache=lowerCamelCase_ )
UpperCamelCase__ = model(lowerCamelCase_ )
UpperCamelCase__ = model(lowerCamelCase_ , use_cache=lowerCamelCase_ )
self.parent.assertTrue(len(lowerCamelCase_ ) == len(lowerCamelCase_ ) )
self.parent.assertTrue(len(lowerCamelCase_ ) == len(lowerCamelCase_ ) + 1 )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = model(lowerCamelCase_ )['''last_hidden_state''']
UpperCamelCase__ = model(lowerCamelCase_ , past_key_values=lowerCamelCase_ )['''last_hidden_state''']
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def A_ ( self : Dict , _a : Optional[int] , _a : Union[str, Any] , ):
UpperCamelCase__ = UMTaModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).half().eval()
UpperCamelCase__ = model(**lowerCamelCase_ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(lowerCamelCase_ ).any().item() )
@require_torch
class __lowercase ( A, A, A, unittest.TestCase ):
'''simple docstring'''
_A : str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_A : Any = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_A : Union[str, Any] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_A : Any = True
_A : str = False
_A : Any = False
_A : Union[str, Any] = True
_A : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_A : Optional[int] = [0.8, 0.9]
def A_ ( self : Tuple ):
UpperCamelCase__ = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=lowerCamelCase_ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def A_ ( self : Tuple ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase_ )
def A_ ( self : List[str] ):
UpperCamelCase__ = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = config_and_inputs[0]
UpperCamelCase__ = UMTaForConditionalGeneration(lowerCamelCase_ ).eval()
model.to(lowerCamelCase_ )
UpperCamelCase__ = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase_ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase_ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase_ ),
}
for attn_name, (name, mask) in zip(lowerCamelCase_ , head_masking.items() ):
UpperCamelCase__ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCamelCase__ = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase_ )
UpperCamelCase__ = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase_ , return_dict_in_generate=lowerCamelCase_ , **lowerCamelCase_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCamelCase__ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def A_ ( self : Union[str, Any] ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def A_ ( self : List[str] ):
UpperCamelCase__ = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=lowerCamelCase_ ).to(lowerCamelCase_ )
UpperCamelCase__ = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=lowerCamelCase_ , legacy=lowerCamelCase_ )
UpperCamelCase__ = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
UpperCamelCase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' , padding=lowerCamelCase_ ).input_ids
# fmt: off
UpperCamelCase__ = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = model.generate(input_ids.to(lowerCamelCase_ ) )
UpperCamelCase__ = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
UpperCamelCase__ = tokenizer.batch_decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 240
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = 3_84
_UpperCamelCase = 2
_UpperCamelCase = 4
_UpperCamelCase = 37
_UpperCamelCase = "gelu"
_UpperCamelCase = 0.1
_UpperCamelCase = 0.1
_UpperCamelCase = 5_12
_UpperCamelCase = 16
_UpperCamelCase = 2
_UpperCamelCase = 0.02
_UpperCamelCase = 3
_UpperCamelCase = 4
_UpperCamelCase = 1_28
_UpperCamelCase = 2
_UpperCamelCase = 9
_UpperCamelCase = 1
_UpperCamelCase = None
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = TFConvBertModel(config=lowerCamelCase_ )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = TFConvBertForMaskedLM(config=lowerCamelCase_ )
_UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFConvBertForSequenceClassification(config=lowerCamelCase_ )
_UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.num_choices
_UpperCamelCase = TFConvBertForMultipleChoice(config=lowerCamelCase_ )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFConvBertForTokenClassification(config=lowerCamelCase_ )
_UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
"""simple docstring"""
_UpperCamelCase = TFConvBertForQuestionAnswering(config=lowerCamelCase_ )
_UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowercase , lowercase , unittest.TestCase ):
__lowercase : int = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : int = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Any = False
__lowercase : List[str] = False
__lowercase : List[Any] = False
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFConvBertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowercase ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
_UpperCamelCase = True
if hasattr(lowerCamelCase_ , "use_cache" ):
_UpperCamelCase = True
_UpperCamelCase = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_UpperCamelCase = getattr(self.model_tester , "key_length" , lowerCamelCase_ )
for model_class in self.all_model_classes:
_UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = len(model(lowerCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
_UpperCamelCase = os.path.join(lowerCamelCase_ , "saved_model" , "1" )
_UpperCamelCase = tf.keras.models.load_model(lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
if self.is_encoder_decoder:
_UpperCamelCase = outputs["encoder_hidden_states"]
_UpperCamelCase = outputs["encoder_attentions"]
else:
_UpperCamelCase = outputs["hidden_states"]
_UpperCamelCase = outputs["attentions"]
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
_UpperCamelCase = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
_UpperCamelCase = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_UpperCamelCase = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_UpperCamelCase = getattr(self.model_tester , "key_length" , lowerCamelCase_ )
_UpperCamelCase = getattr(self.model_tester , "key_length" , lowerCamelCase_ )
def check_decoder_attentions_output(lowerCamelCase_ ):
_UpperCamelCase = len(lowerCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
_UpperCamelCase = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase_ ):
_UpperCamelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_UpperCamelCase = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase = True
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(lowerCamelCase_ )[0]
_UpperCamelCase = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCamelCase_ )
_UpperCamelCase = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 147
| 0
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case ( lowerCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = AudioLDMPipeline
__lowerCAmelCase : Optional[int] = TEXT_TO_AUDIO_PARAMS
__lowerCAmelCase : Union[str, Any] = TEXT_TO_AUDIO_BATCH_PARAMS
__lowerCAmelCase : Union[str, Any] = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def lowerCAmelCase__ ( self):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_A , )
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
SCREAMING_SNAKE_CASE_ = ClapTextModelWithProjection(_A)
SCREAMING_SNAKE_CASE_ = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77)
SCREAMING_SNAKE_CASE_ = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_A , )
SCREAMING_SNAKE_CASE_ = SpeechTaHifiGan(_A)
SCREAMING_SNAKE_CASE_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def lowerCAmelCase__ ( self , _A , _A=0):
if str(_A).startswith('mps'):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A)
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A).manual_seed(_A)
SCREAMING_SNAKE_CASE_ = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A)
SCREAMING_SNAKE_CASE_ = output.audios[0]
assert audio.ndim == 1
assert len(_A) == 256
SCREAMING_SNAKE_CASE_ = audio[:10]
SCREAMING_SNAKE_CASE_ = np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A)
SCREAMING_SNAKE_CASE_ = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A)
SCREAMING_SNAKE_CASE_ = output.audios[0]
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A)
SCREAMING_SNAKE_CASE_ = 3 * [inputs.pop('prompt')]
SCREAMING_SNAKE_CASE_ = audioldm_pipe.tokenizer(
_A , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = text_inputs['input_ids'].to(_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe.text_encoder(
_A , )
SCREAMING_SNAKE_CASE_ = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE_ = F.normalize(_A , dim=-1)
SCREAMING_SNAKE_CASE_ = prompt_embeds
# forward
SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A)
SCREAMING_SNAKE_CASE_ = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A)
SCREAMING_SNAKE_CASE_ = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE_ = negative_prompt
SCREAMING_SNAKE_CASE_ = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A)
SCREAMING_SNAKE_CASE_ = output.audios[0]
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A)
SCREAMING_SNAKE_CASE_ = 3 * [inputs.pop('prompt')]
SCREAMING_SNAKE_CASE_ = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE_ = audioldm_pipe.tokenizer(
_A , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = text_inputs['input_ids'].to(_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe.text_encoder(
_A , )
SCREAMING_SNAKE_CASE_ = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE_ = F.normalize(_A , dim=-1)
embeds.append(_A)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = embeds
# forward
SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A)
SCREAMING_SNAKE_CASE_ = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_A)
SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A)
SCREAMING_SNAKE_CASE_ = 'egg cracking'
SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A , negative_prompt=_A)
SCREAMING_SNAKE_CASE_ = output.audios[0]
assert audio.ndim == 1
assert len(_A) == 256
SCREAMING_SNAKE_CASE_ = audio[:10]
SCREAMING_SNAKE_CASE_ = np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_A)
SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
SCREAMING_SNAKE_CASE_ = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE_ = audioldm_pipe(_A , num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = audioldm_pipe(_A , num_inference_steps=2 , num_waveforms_per_prompt=_A).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_A).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe(audio_length_in_s=0.0_1_6 , **_A)
SCREAMING_SNAKE_CASE_ = output.audios[0]
assert audio.ndim == 1
assert len(_A) / vocoder_sampling_rate == 0.0_1_6
SCREAMING_SNAKE_CASE_ = audioldm_pipe(audio_length_in_s=0.0_3_2 , **_A)
SCREAMING_SNAKE_CASE_ = output.audios[0]
assert audio.ndim == 1
assert len(_A) / vocoder_sampling_rate == 0.0_3_2
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = AudioLDMPipeline(**_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
SCREAMING_SNAKE_CASE_ = ['hey']
SCREAMING_SNAKE_CASE_ = audioldm_pipe(_A , num_inference_steps=1)
SCREAMING_SNAKE_CASE_ = output.audios.shape
assert audio_shape == (1, 256)
SCREAMING_SNAKE_CASE_ = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE_ = SpeechTaHifiGan(_A).to(_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe(_A , num_inference_steps=1)
SCREAMING_SNAKE_CASE_ = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowerCAmelCase__ ( self):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_A)
def lowerCAmelCase__ ( self):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_A)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase__ ( self):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A)
@slow
class __snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self , _A , _A="cpu" , _A=torch.floataa , _A=0):
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A).manual_seed(_A)
SCREAMING_SNAKE_CASE_ = np.random.RandomState(_A).standard_normal((1, 8, 128, 16))
SCREAMING_SNAKE_CASE_ = torch.from_numpy(_A).to(device=_A , dtype=_A)
SCREAMING_SNAKE_CASE_ = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = AudioLDMPipeline.from_pretrained('cvssp/audioldm')
SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
SCREAMING_SNAKE_CASE_ = self.get_inputs(_A)
SCREAMING_SNAKE_CASE_ = 25
SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A).audios[0]
assert audio.ndim == 1
assert len(_A) == 81920
SCREAMING_SNAKE_CASE_ = audio[77230:77240]
SCREAMING_SNAKE_CASE_ = np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5])
SCREAMING_SNAKE_CASE_ = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1E-2
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = AudioLDMPipeline.from_pretrained('cvssp/audioldm')
SCREAMING_SNAKE_CASE_ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
SCREAMING_SNAKE_CASE_ = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
SCREAMING_SNAKE_CASE_ = self.get_inputs(_A)
SCREAMING_SNAKE_CASE_ = audioldm_pipe(**_A).audios[0]
assert audio.ndim == 1
assert len(_A) == 81920
SCREAMING_SNAKE_CASE_ = audio[27780:27790]
SCREAMING_SNAKE_CASE_ = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2])
SCREAMING_SNAKE_CASE_ = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3E-2
| 720
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : Optional[int] = 'dpr'
def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1E-12 , _A=0 , _A="absolute" , _A = 0 , **_A , ):
super().__init__(pad_token_id=_A , **_A)
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = projection_dim
SCREAMING_SNAKE_CASE_ = position_embedding_type
| 620
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowercase : Dict = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 564
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def SCREAMING_SNAKE_CASE ( snake_case, snake_case=7):
__snake_case = None
if token is not None:
__snake_case = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
# The id of a workflow (not of a workflow run)
__snake_case = '''636036'''
__snake_case = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
__snake_case = requests.get(snake_case, headers=snake_case).json()
return result["workflow_runs"]
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = get_daily_ci_runs(snake_case)
__snake_case = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__snake_case = workflow_run['''id''']
break
return workflow_run_id
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
__snake_case = get_last_daily_ci_runs(snake_case)
if workflow_run_id is not None:
__snake_case = get_artifacts_links(worflow_run_id=snake_case, token=snake_case)
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__snake_case = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case, artifact_url=snake_case, output_dir=snake_case, token=snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
get_last_daily_ci_artifacts(snake_case, snake_case, snake_case)
__snake_case = {}
for artifact_name in artifact_names:
__snake_case = os.path.join(snake_case, f"{artifact_name}.zip")
if os.path.isfile(snake_case):
__snake_case = {}
with zipfile.ZipFile(snake_case) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case):
# read the file
with z.open(snake_case) as f:
__snake_case = f.read().decode('''UTF-8''')
return results
| 564
| 1
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__magic_name__ = logging.get_logger(__name__)
class lowercase :
'''simple docstring'''
def __init__( self , _snake_case , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = question_encoder
UpperCAmelCase = generator
UpperCAmelCase = self.question_encoder
def snake_case_ ( self , _snake_case ) -> Tuple:
"""simple docstring"""
if os.path.isfile(_snake_case ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_snake_case , exist_ok=_snake_case )
UpperCAmelCase = os.path.join(_snake_case , '''question_encoder_tokenizer''' )
UpperCAmelCase = os.path.join(_snake_case , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(_snake_case )
self.generator.save_pretrained(_snake_case )
@classmethod
def snake_case_ ( cls , _snake_case , **_snake_case ) -> Dict:
"""simple docstring"""
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase = kwargs.pop('''config''' , _snake_case )
if config is None:
UpperCAmelCase = RagConfig.from_pretrained(_snake_case )
UpperCAmelCase = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
UpperCAmelCase = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=_snake_case , generator=_snake_case )
def __call__( self , *_snake_case , **_snake_case ) -> int:
"""simple docstring"""
return self.current_tokenizer(*_snake_case , **_snake_case )
def snake_case_ ( self , *_snake_case , **_snake_case ) -> int:
"""simple docstring"""
return self.generator.batch_decode(*_snake_case , **_snake_case )
def snake_case_ ( self , *_snake_case , **_snake_case ) -> Tuple:
"""simple docstring"""
return self.generator.decode(*_snake_case , **_snake_case )
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.question_encoder
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.generator
def snake_case_ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = "longest" , _snake_case = None , _snake_case = True , **_snake_case , ) -> BatchEncoding:
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
UpperCAmelCase = self.current_tokenizer.model_max_length
UpperCAmelCase = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase = self.current_tokenizer.model_max_length
UpperCAmelCase = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
UpperCAmelCase = labels['''input_ids''']
return model_inputs
| 391
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self , _snake_case , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = 20
UpperCAmelCase = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
UpperCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCAmelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCAmelCase = jax.nn.softmax(_snake_case , axis=-1 )
UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
UpperCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = 10
UpperCAmelCase = 2
# create ramp distribution
UpperCAmelCase = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
UpperCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCAmelCase = FlaxTopKLogitsWarper(3 )
UpperCAmelCase = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCAmelCase = 5
UpperCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCAmelCase = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
UpperCAmelCase = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = 10
UpperCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
UpperCAmelCase = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCAmelCase = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCAmelCase = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
UpperCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCAmelCase = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = 20
UpperCAmelCase = 4
UpperCAmelCase = 0
UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
UpperCAmelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
UpperCAmelCase = 5
UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
UpperCAmelCase = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
UpperCAmelCase = 15
UpperCAmelCase = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = 20
UpperCAmelCase = 4
UpperCAmelCase = 0
UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
UpperCAmelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
UpperCAmelCase = 1
UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
UpperCAmelCase = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCAmelCase = 3
UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
UpperCAmelCase = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = 20
UpperCAmelCase = 4
UpperCAmelCase = 0
UpperCAmelCase = 5
UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCAmelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
UpperCAmelCase = 4
UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
UpperCAmelCase = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCAmelCase = 3
UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
UpperCAmelCase = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 4
UpperCAmelCase = 10
UpperCAmelCase = 15
UpperCAmelCase = 2
UpperCAmelCase = 1
UpperCAmelCase = 15
# dummy input_ids and scores
UpperCAmelCase = ids_tensor((batch_size, sequence_length) , _snake_case )
UpperCAmelCase = input_ids.copy()
UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
UpperCAmelCase = scores.copy()
# instantiate all dist processors
UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase = FlaxTopKLogitsWarper(3 )
UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
UpperCAmelCase = 10
# no processor list
UpperCAmelCase = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
UpperCAmelCase = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
UpperCAmelCase = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
UpperCAmelCase = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
UpperCAmelCase = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
UpperCAmelCase = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = 4
UpperCAmelCase = 10
UpperCAmelCase = 15
UpperCAmelCase = 2
UpperCAmelCase = 1
UpperCAmelCase = 15
# dummy input_ids and scores
UpperCAmelCase = ids_tensor((batch_size, sequence_length) , _snake_case )
UpperCAmelCase = input_ids.copy()
UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
UpperCAmelCase = scores.copy()
# instantiate all dist processors
UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase = FlaxTopKLogitsWarper(3 )
UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
UpperCAmelCase = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
UpperCAmelCase = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
UpperCAmelCase = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
UpperCAmelCase = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
UpperCAmelCase = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
UpperCAmelCase = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
UpperCAmelCase = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
UpperCAmelCase = jax.jit(_snake_case )
UpperCAmelCase = jax.jit(_snake_case )
UpperCAmelCase = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
UpperCAmelCase = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 391
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__UpperCAmelCase = {
'''facebook/mbart-large-en-ro''': 1_024,
'''facebook/mbart-large-cc25''': 1_024,
}
# fmt: off
__UpperCAmelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Any = MBartTokenizer
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, src_lang=SCREAMING_SNAKE_CASE_, tgt_lang=SCREAMING_SNAKE_CASE_, additional_special_tokens=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Union[str, Any] = vocab_file
UpperCamelCase : List[Any] = False if not self.vocab_file else True
UpperCamelCase : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
UpperCamelCase : Tuple = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Any = src_lang if src_lang is not None else 'en_XX'
UpperCamelCase : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Union[str, Any] = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCamelCase : List[Any] = src_lang
UpperCamelCase : List[str] = self(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = tgt_lang_id
return inputs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "en_XX", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "ro_RO", **SCREAMING_SNAKE_CASE_, ) -> BatchEncoding:
UpperCamelCase : Tuple = src_lang
UpperCamelCase : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : str = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = []
UpperCamelCase : str = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Optional[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = []
UpperCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase__ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowercase__ = spec.loader.load_module()
lowercase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowercase__ = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
lowercase__ = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def __magic_name__ ( ):
__a : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
__a : Dict = False
# source code of `config_class`
__a : List[str] = inspect.getsource(_lowerCamelCase )
__a : int = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__a , __a : Union[str, Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__a : Optional[Any] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
__a : Optional[int] = True
break
__a : List[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__a : Any = """\n""".join(sorted(_lowerCamelCase ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 581
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ = logging.get_logger(__name__)
# TODO: upload to AWS
a_ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="retribert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=8 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=True , UpperCamelCase_=1_28 , UpperCamelCase_=0 , **UpperCamelCase_ , ) -> Any:
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : Dict = vocab_size
__lowercase : Tuple = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : str = hidden_act
__lowercase : int = intermediate_size
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Union[str, Any] = max_position_embeddings
__lowercase : str = type_vocab_size
__lowercase : Dict = initializer_range
__lowercase : List[Any] = layer_norm_eps
__lowercase : int = share_encoders
__lowercase : Union[str, Any] = projection_dim
| 523
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def __UpperCAmelCase ( ):
__lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCamelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCamelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCamelCase , help='''where to store parsed gold_data_path file''' , )
__lowercase : Dict = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__lowercase : str = json.load(__UpperCamelCase )
for dpr_record in tqdm(__UpperCamelCase ):
__lowercase : Optional[int] = dpr_record['''question''']
__lowercase : Optional[int] = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCamelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 523
| 1
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {'vocab_file': 'vocab.json'}
_lowerCamelCase : Any = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_lowerCamelCase : int = {'mgp-str': 27}
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]="[GO]" , lowerCAmelCase_ : str="[GO]" , lowerCAmelCase_ : List[Any]="[s]" , lowerCAmelCase_ : Any="[GO]" , **lowerCAmelCase_ : str ) -> Tuple:
super().__init__(
unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase_ = json.load(lowerCAmelCase_ )
UpperCAmelCase_ = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase ( self : Optional[int] ) -> List[Any]:
return len(self.vocab )
def UpperCamelCase ( self : Tuple ) -> Tuple:
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase ( self : str , lowerCAmelCase_ : int ) -> int:
UpperCAmelCase_ = []
for s in text:
char_tokens.extend(lowerCAmelCase_ )
return char_tokens
def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : str ) -> List[Any]:
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase ( self : Optional[int] , lowerCAmelCase_ : str ) -> Optional[int]:
return self.decoder.get(lowerCAmelCase_ )
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase_ ) )
return
UpperCAmelCase_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
return (vocab_file,)
| 121
|
import os
from math import logaa
def _lowerCAmelCase ( __magic_name__ :str = "base_exp.txt" ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) ):
UpperCAmelCase_, UpperCAmelCase_ = list(map(__magic_name__ , line.split(''',''' ) ) )
if x * logaa(__magic_name__ ) > largest:
UpperCAmelCase_ = x * logaa(__magic_name__ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 121
| 1
|
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _SCREAMING_SNAKE_CASE( snake_case_ : Tuple , snake_case_ : List[Any] ) ->int:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _SCREAMING_SNAKE_CASE( snake_case_ : Dict , snake_case_ : int , snake_case_ : List[str] ) ->Tuple:
'''simple docstring'''
_lowercase : List[Any] = tmp_path / '''cache'''
_lowercase : Dict = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase : int = TextDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_text_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def _SCREAMING_SNAKE_CASE( snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : str ) ->Tuple:
'''simple docstring'''
_lowercase : int = tmp_path / '''cache'''
_lowercase : Any = {'''text''': '''string'''}
_lowercase : str = features.copy() if features else default_expected_features
_lowercase : List[Any] = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase : Union[str, Any] = TextDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_text_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _SCREAMING_SNAKE_CASE( snake_case_ : Any , snake_case_ : Any , snake_case_ : int ) ->Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = tmp_path / '''cache'''
_lowercase : Dict = {'''text''': '''string'''}
_lowercase : Optional[Any] = TextDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_text_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _SCREAMING_SNAKE_CASE( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Optional[int] ) ->Optional[int]:
'''simple docstring'''
if issubclass(snake_case_ , snake_case_ ):
_lowercase : Optional[int] = text_path
elif issubclass(snake_case_ , snake_case_ ):
_lowercase : Tuple = [text_path]
_lowercase : Any = tmp_path / '''cache'''
_lowercase : Any = {'''text''': '''string'''}
_lowercase : Dict = TextDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_text_dataset(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE( snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Dict=("train",) ) ->Tuple:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
_lowercase : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _SCREAMING_SNAKE_CASE( snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Any ) ->Optional[int]:
'''simple docstring'''
_lowercase : int = tmp_path / '''cache'''
_lowercase : Tuple = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase : Union[str, Any] = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_text_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def _SCREAMING_SNAKE_CASE( snake_case_ : Any , snake_case_ : int , snake_case_ : Dict ) ->Union[str, Any]:
'''simple docstring'''
_lowercase : int = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_lowercase : Dict = {'''text''': '''string'''}
_lowercase : Any = features.copy() if features else default_expected_features
_lowercase : Union[str, Any] = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase : Any = TextDatasetReader({'''train''': text_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_text_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _SCREAMING_SNAKE_CASE( snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : List[Any] ) ->int:
'''simple docstring'''
if split:
_lowercase : Any = {split: text_path}
else:
_lowercase : int = '''train'''
_lowercase : Any = {'''train''': text_path, '''test''': text_path}
_lowercase : Optional[Any] = tmp_path / '''cache'''
_lowercase : Optional[Any] = {'''text''': '''string'''}
_lowercase : Union[str, Any] = TextDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_text_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 708
|
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class _lowerCAmelCase ( __A ):
'''simple docstring'''
snake_case_ = 'align_text_model'
def __init__( self : Dict , UpperCamelCase_ : Dict=30_522 , UpperCamelCase_ : Dict=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Any=12 , UpperCamelCase_ : Optional[Any]=3_072 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Optional[int]=512 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : Dict=1e-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[Any]="absolute" , UpperCamelCase_ : Dict=True , **UpperCamelCase_ : List[Any] , ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Tuple = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : List[str] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : List[str] = intermediate_size
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : Any = type_vocab_size
_lowercase : Tuple = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Optional[Any] = use_cache
_lowercase : List[str] = pad_token_id
@classmethod
def __lowercase ( cls : int , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
_lowercase : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class _lowerCAmelCase ( __A ):
'''simple docstring'''
snake_case_ = 'align_vision_model'
def __init__( self : List[str] , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 600 , UpperCamelCase_ : float = 2.0 , UpperCamelCase_ : float = 3.1 , UpperCamelCase_ : int = 8 , UpperCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCamelCase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCamelCase_ : List[int] = [] , UpperCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase_ : float = 0.25 , UpperCamelCase_ : str = "swish" , UpperCamelCase_ : int = 2_560 , UpperCamelCase_ : str = "mean" , UpperCamelCase_ : float = 0.02 , UpperCamelCase_ : float = 0.001 , UpperCamelCase_ : float = 0.99 , UpperCamelCase_ : float = 0.2 , **UpperCamelCase_ : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[int] = num_channels
_lowercase : Any = image_size
_lowercase : Dict = width_coefficient
_lowercase : Union[str, Any] = depth_coefficient
_lowercase : Union[str, Any] = depth_divisor
_lowercase : Dict = kernel_sizes
_lowercase : Optional[Any] = in_channels
_lowercase : str = out_channels
_lowercase : str = depthwise_padding
_lowercase : List[str] = strides
_lowercase : Any = num_block_repeats
_lowercase : List[str] = expand_ratios
_lowercase : List[str] = squeeze_expansion_ratio
_lowercase : int = hidden_act
_lowercase : Any = hidden_dim
_lowercase : Tuple = pooling_type
_lowercase : Optional[Any] = initializer_range
_lowercase : str = batch_norm_eps
_lowercase : Dict = batch_norm_momentum
_lowercase : List[str] = drop_connect_rate
_lowercase : List[Any] = sum(UpperCamelCase_ ) * 4
@classmethod
def __lowercase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : Union[str, Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
_lowercase : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class _lowerCAmelCase ( __A ):
'''simple docstring'''
snake_case_ = 'align'
snake_case_ = True
def __init__( self : str , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=640 , UpperCamelCase_ : List[str]=1.0 , UpperCamelCase_ : List[Any]=0.02 , **UpperCamelCase_ : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if text_config is None:
_lowercase : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
_lowercase : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
_lowercase : Optional[int] = AlignTextConfig(**UpperCamelCase_ )
_lowercase : Any = AlignVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = projection_dim
_lowercase : Any = temperature_init_value
_lowercase : Any = initializer_range
@classmethod
def __lowercase ( cls : List[str] , UpperCamelCase_ : AlignTextConfig , UpperCamelCase_ : AlignVisionConfig , **UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase_ )
def __lowercase ( self : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : str = copy.deepcopy(self.__dict__ )
_lowercase : Optional[Any] = self.text_config.to_dict()
_lowercase : Dict = self.vision_config.to_dict()
_lowercase : Union[str, Any] = self.__class__.model_type
return output
| 411
| 0
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
a_ : Optional[Any] = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
a_ : Union[str, Any] = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase__ )[0]
@deprecated(UpperCAmelCase__ , "Please use tf.data to implement this functionality." )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase__ ) as bytestream:
lowerCamelCase = _readaa(UpperCAmelCase__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowerCamelCase = _readaa(UpperCAmelCase__ )
lowerCamelCase = _readaa(UpperCAmelCase__ )
lowerCamelCase = _readaa(UpperCAmelCase__ )
lowerCamelCase = bytestream.read(rows * cols * num_images )
lowerCamelCase = numpy.frombuffer(UpperCAmelCase__ , dtype=numpy.uinta )
lowerCamelCase = data.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , 1 )
return data
@deprecated(UpperCAmelCase__ , "Please use tf.one_hot on tensors." )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = labels_dense.shape[0]
lowerCamelCase = numpy.arange(UpperCAmelCase__ ) * num_classes
lowerCamelCase = numpy.zeros((num_labels, num_classes) )
lowerCamelCase = 1
return labels_one_hot
@deprecated(UpperCAmelCase__ , "Please use tf.data to implement this functionality." )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=10 ):
"""simple docstring"""
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase__ ) as bytestream:
lowerCamelCase = _readaa(UpperCAmelCase__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowerCamelCase = _readaa(UpperCAmelCase__ )
lowerCamelCase = bytestream.read(UpperCAmelCase__ )
lowerCamelCase = numpy.frombuffer(UpperCAmelCase__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCAmelCase__ , UpperCAmelCase__ )
return labels
class lowerCamelCase__ :
"""simple docstring"""
@deprecated(
__a , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__(self , __a , __a , __a=False , __a=False , __a=dtypes.floataa , __a=True , __a=None , ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = random_seed.get_seed(__a )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCamelCase = dtypes.as_dtype(__a ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowerCamelCase = 1_00_00
lowerCamelCase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"""images.shape: {images.shape} labels.shape: {labels.shape}"""
lowerCamelCase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCamelCase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCamelCase = images.astype(numpy.floataa )
lowerCamelCase = numpy.multiply(__a , 1.0 / 255.0 )
lowerCamelCase = images
lowerCamelCase = labels
lowerCamelCase = 0
lowerCamelCase = 0
@property
def _a (self ):
'''simple docstring'''
return self._images
@property
def _a (self ):
'''simple docstring'''
return self._labels
@property
def _a (self ):
'''simple docstring'''
return self._num_examples
@property
def _a (self ):
'''simple docstring'''
return self._epochs_completed
def _a (self , __a , __a=False , __a=True ):
'''simple docstring'''
if fake_data:
lowerCamelCase = [1] * 7_84
lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__a )],
[fake_label for _ in range(__a )],
)
lowerCamelCase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(__a )
lowerCamelCase = self.images[perma]
lowerCamelCase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCamelCase = self._num_examples - start
lowerCamelCase = self._images[start : self._num_examples]
lowerCamelCase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(__a )
lowerCamelCase = self.images[perm]
lowerCamelCase = self.labels[perm]
# Start next epoch
lowerCamelCase = 0
lowerCamelCase = batch_size - rest_num_examples
lowerCamelCase = self._index_in_epoch
lowerCamelCase = self._images[start:end]
lowerCamelCase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCamelCase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCAmelCase__ , "Please write your own downloading logic." )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if not gfile.Exists(UpperCAmelCase__ ):
gfile.MakeDirs(UpperCAmelCase__ )
lowerCamelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
if not gfile.Exists(UpperCAmelCase__ ):
urllib.request.urlretrieve(UpperCAmelCase__ , UpperCAmelCase__ ) # noqa: S310
with gfile.GFile(UpperCAmelCase__ ) as f:
lowerCamelCase = f.size()
print("Successfully downloaded" , UpperCAmelCase__ , UpperCAmelCase__ , "bytes." )
return filepath
@deprecated(
UpperCAmelCase__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=dtypes.floataa , UpperCAmelCase__=True , UpperCAmelCase__=5000 , UpperCAmelCase__=None , UpperCAmelCase__=DEFAULT_SOURCE_URL , ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCAmelCase__ , one_hot=UpperCAmelCase__ , dtype=UpperCAmelCase__ , seed=UpperCAmelCase__ )
lowerCamelCase = fake()
lowerCamelCase = fake()
lowerCamelCase = fake()
return _Datasets(train=UpperCAmelCase__ , validation=UpperCAmelCase__ , test=UpperCAmelCase__ )
if not source_url: # empty string check
lowerCamelCase = DEFAULT_SOURCE_URL
lowerCamelCase = "train-images-idx3-ubyte.gz"
lowerCamelCase = "train-labels-idx1-ubyte.gz"
lowerCamelCase = "t10k-images-idx3-ubyte.gz"
lowerCamelCase = "t10k-labels-idx1-ubyte.gz"
lowerCamelCase = _maybe_download(
UpperCAmelCase__ , UpperCAmelCase__ , source_url + train_images_file )
with gfile.Open(UpperCAmelCase__ , "rb" ) as f:
lowerCamelCase = _extract_images(UpperCAmelCase__ )
lowerCamelCase = _maybe_download(
UpperCAmelCase__ , UpperCAmelCase__ , source_url + train_labels_file )
with gfile.Open(UpperCAmelCase__ , "rb" ) as f:
lowerCamelCase = _extract_labels(UpperCAmelCase__ , one_hot=UpperCAmelCase__ )
lowerCamelCase = _maybe_download(
UpperCAmelCase__ , UpperCAmelCase__ , source_url + test_images_file )
with gfile.Open(UpperCAmelCase__ , "rb" ) as f:
lowerCamelCase = _extract_images(UpperCAmelCase__ )
lowerCamelCase = _maybe_download(
UpperCAmelCase__ , UpperCAmelCase__ , source_url + test_labels_file )
with gfile.Open(UpperCAmelCase__ , "rb" ) as f:
lowerCamelCase = _extract_labels(UpperCAmelCase__ , one_hot=UpperCAmelCase__ )
if not 0 <= validation_size <= len(UpperCAmelCase__ ):
lowerCamelCase = (
"Validation size should be between 0 and "
F"""{len(UpperCAmelCase__ )}. Received: {validation_size}."""
)
raise ValueError(UpperCAmelCase__ )
lowerCamelCase = train_images[:validation_size]
lowerCamelCase = train_labels[:validation_size]
lowerCamelCase = train_images[validation_size:]
lowerCamelCase = train_labels[validation_size:]
lowerCamelCase = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowerCamelCase = _DataSet(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCamelCase = _DataSet(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCamelCase = _DataSet(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
return _Datasets(train=UpperCAmelCase__ , validation=UpperCAmelCase__ , test=UpperCAmelCase__ )
| 623
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = AudioLDMPipeline
_A = TEXT_TO_AUDIO_PARAMS
_A = TEXT_TO_AUDIO_BATCH_PARAMS
_A = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
])
def _a (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__a , )
lowerCamelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , )
lowerCamelCase = ClapTextModelWithProjection(__a )
lowerCamelCase = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 )
lowerCamelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__a , )
lowerCamelCase = SpeechTaHifiGan(__a )
lowerCamelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def _a (self , __a , __a=0 ):
'''simple docstring'''
if str(__a ).startswith("mps" ):
lowerCamelCase = torch.manual_seed(__a )
else:
lowerCamelCase = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 2_56
lowerCamelCase = audio[:10]
lowerCamelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = 3 * [inputs["prompt"]]
# forward
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = 3 * [inputs.pop("prompt" )]
lowerCamelCase = audioldm_pipe.tokenizer(
__a , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
lowerCamelCase = text_inputs["input_ids"].to(__a )
lowerCamelCase = audioldm_pipe.text_encoder(
__a , )
lowerCamelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase = F.normalize(__a , dim=-1 )
lowerCamelCase = prompt_embeds
# forward
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = 3 * ["this is a negative prompt"]
lowerCamelCase = negative_prompt
lowerCamelCase = 3 * [inputs["prompt"]]
# forward
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = 3 * [inputs.pop("prompt" )]
lowerCamelCase = []
for p in [prompt, negative_prompt]:
lowerCamelCase = audioldm_pipe.tokenizer(
__a , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
lowerCamelCase = text_inputs["input_ids"].to(__a )
lowerCamelCase = audioldm_pipe.text_encoder(
__a , )
lowerCamelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase = F.normalize(__a , dim=-1 )
embeds.append(__a )
lowerCamelCase , lowerCamelCase = embeds
# forward
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = PNDMScheduler(skip_prk_steps=__a )
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = "egg cracking"
lowerCamelCase = audioldm_pipe(**__a , negative_prompt=__a )
lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 2_56
lowerCamelCase = audio[:10]
lowerCamelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = PNDMScheduler(skip_prk_steps=__a )
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
lowerCamelCase = audioldm_pipe(__a , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCamelCase = 2
lowerCamelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
lowerCamelCase = 2
lowerCamelCase = audioldm_pipe(__a , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
lowerCamelCase = 2
lowerCamelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = audioldm_pipe.vocoder.config.sampling_rate
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = audioldm_pipe(audio_length_in_s=0.016 , **__a )
lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.016
lowerCamelCase = audioldm_pipe(audio_length_in_s=0.032 , **__a )
lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.032
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = ["hey"]
lowerCamelCase = audioldm_pipe(__a , num_inference_steps=1 )
lowerCamelCase = output.audios.shape
assert audio_shape == (1, 2_56)
lowerCamelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCamelCase = SpeechTaHifiGan(__a ).to(__a )
lowerCamelCase = audioldm_pipe(__a , num_inference_steps=1 )
lowerCamelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def _a (self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a )
def _a (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=__a )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _a (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a )
@slow
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self , __a , __a="cpu" , __a=torch.floataa , __a=0 ):
'''simple docstring'''
lowerCamelCase = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase = np.random.RandomState(__a ).standard_normal((1, 8, 1_28, 16) )
lowerCamelCase = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def _a (self ):
'''simple docstring'''
lowerCamelCase = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_inputs(__a )
lowerCamelCase = 25
lowerCamelCase = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 8_19_20
lowerCamelCase = audio[7_72_30:7_72_40]
lowerCamelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
lowerCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
lowerCamelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_inputs(__a )
lowerCamelCase = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 8_19_20
lowerCamelCase = audio[2_77_80:2_77_90]
lowerCamelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
lowerCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 623
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self: str ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict=7 ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: Tuple=18 ,__lowerCAmelCase: Optional[int]=30 ,__lowerCAmelCase: Optional[Any]=400 ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: int=True ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Optional[int]=True ,):
'''simple docstring'''
_lowerCamelCase : int = size if size is not None else {"shortest_edge": 20}
_lowerCamelCase : Dict = crop_size if crop_size is not None else {"height": 18, "width": 18}
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : Optional[Any] = min_resolution
_lowerCamelCase : Tuple = max_resolution
_lowerCamelCase : Optional[int] = do_resize
_lowerCamelCase : Optional[Any] = size
_lowerCamelCase : str = do_center_crop
_lowerCamelCase : Tuple = crop_size
_lowerCamelCase : Dict = do_flip_channel_order
def _lowercase ( self: Dict ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MobileViTImageProcessor if is_vision_available() else None
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = MobileViTImageProcessingTester(self )
@property
def _lowercase ( self: int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_resize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"size" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_center_crop" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"center_crop" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_flip_channel_order" ) )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
_lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def _lowercase ( self: List[str] ):
'''simple docstring'''
pass
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,Image.Image )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
_lowerCamelCase : Optional[Any] = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase ,numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,np.ndarray )
# Test not batched input
_lowerCamelCase : Dict = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
_lowerCamelCase : List[str] = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase ,torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,torch.Tensor )
# Test not batched input
_lowerCamelCase : Dict = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
_lowerCamelCase : Optional[Any] = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 721
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_lowerCamelCase : Optional[Any] = precision
_lowerCamelCase : List[str] = ceil(precision / 14 )
_lowerCamelCase : List[str] = 426880 * Decimal(10005 ).sqrt()
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : List[str] = 13591409
_lowerCamelCase : Optional[int] = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
_lowerCamelCase : str = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_lowerCAmelCase : Tuple = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 386
| 0
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __UpperCamelCase ( lowerCAmelCase__ : Features ):
__a : List[str] = np.inf
def set_batch_size(lowerCAmelCase__ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : Optional[Any] = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : List[Any] = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and feature.dtype == "binary":
__a : Optional[Any] = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowerCAmelCase__ , lowerCAmelCase__ )
return None if batch_size is np.inf else batch_size
class UpperCamelCase__ ( __lowercase ):
def __init__(self : Tuple , snake_case_ : NestedDataStructureLike[PathLike] , snake_case_ : Optional[NamedSplit] = None , snake_case_ : Optional[Features] = None , snake_case_ : str = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[int] = None , **snake_case_ : List[Any] , ):
super().__init__(
snake_case_ , split=snake_case_ , features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , num_proc=snake_case_ , **snake_case_ , )
__a : Tuple = path_or_paths if isinstance(snake_case_ , snake_case_ ) else {self.split: path_or_paths}
__a : List[Any] = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
__a : Any = Parquet(
cache_dir=snake_case_ , data_files=snake_case_ , features=snake_case_ , hash=snake_case_ , **snake_case_ , )
def lowerCAmelCase (self : Any ):
# Build iterable dataset
if self.streaming:
__a : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__a : Optional[int] = None
__a : List[Any] = None
__a : Any = None
__a : Any = None
self.builder.download_and_prepare(
download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , num_proc=self.num_proc , )
__a : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=snake_case_ , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase__ :
def __init__(self : List[Any] , snake_case_ : Dataset , snake_case_ : Union[PathLike, BinaryIO] , snake_case_ : Optional[int] = None , **snake_case_ : Optional[int] , ):
__a : Optional[Any] = dataset
__a : Any = path_or_buf
__a : Any = batch_size or get_writer_batch_size(dataset.features )
__a : Optional[Any] = parquet_writer_kwargs
def lowerCAmelCase (self : Optional[int] ):
__a : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
__a : str = self._write(file_obj=snake_case_ , batch_size=snake_case_ , **self.parquet_writer_kwargs )
else:
__a : Tuple = self._write(file_obj=self.path_or_buf , batch_size=snake_case_ , **self.parquet_writer_kwargs )
return written
def lowerCAmelCase (self : List[str] , snake_case_ : BinaryIO , snake_case_ : int , **snake_case_ : Dict ):
__a : Union[str, Any] = 0
__a : str = parquet_writer_kwargs.pop('''path_or_buf''' , snake_case_ )
__a : int = self.dataset.features.arrow_schema
__a : Any = pq.ParquetWriter(snake_case_ , schema=snake_case_ , **snake_case_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , snake_case_ ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
__a : Any = query_table(
table=self.dataset._data , key=slice(snake_case_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(snake_case_ )
written += batch.nbytes
writer.close()
return written
| 521
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple=None ):
__a : Union[str, Any] = None
if token is not None:
__a : List[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
__a : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
__a : Any = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
__a : str = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__a : int = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCAmelCase__ ):
__a : str = requests.get(url + f"&page={i + 2}" , headers=lowerCAmelCase__ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=None ):
__a : Optional[int] = None
if token is not None:
__a : List[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
__a : Union[str, Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
__a : str = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
__a : Union[str, Any] = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
__a : Dict = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCAmelCase__ ):
__a : Optional[int] = requests.get(url + f"&page={i + 2}" , headers=lowerCAmelCase__ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ):
__a : Optional[int] = None
if token is not None:
__a : str = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
__a : Any = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ )
__a : Any = result.headers['''Location''']
__a : List[str] = requests.get(lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ )
__a : Union[str, Any] = os.path.join(lowerCAmelCase__ , f"{artifact_name}.zip" )
with open(lowerCAmelCase__ , '''wb''' ) as fp:
fp.write(response.content )
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=None ):
__a : Tuple = []
__a : List[Any] = []
__a : str = None
with zipfile.ZipFile(lowerCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCAmelCase__ ) as f:
for line in f:
__a : str = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__a : Union[str, Any] = line[: line.index(''': ''' )]
__a : Optional[int] = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
__a : Optional[int] = line[len('''FAILED ''' ) :]
failed_tests.append(lowerCAmelCase__ )
elif filename == "job_name.txt":
__a : Dict = line
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCAmelCase__ )} for `errors` "
f"and {len(lowerCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
''' problem.''' )
__a : Dict = None
if job_name and job_links:
__a : str = job_links.get(lowerCAmelCase__ , lowerCAmelCase__ )
# A list with elements of the form (line of error, error, failed test)
__a : int = [x + [y] + [job_link] for x, y in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
return result
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None ):
__a : str = []
__a : int = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for p in os.listdir(lowerCAmelCase__ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCAmelCase__ , job_links=lowerCAmelCase__ ) )
return errors
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int=None ):
__a : Optional[int] = Counter()
counter.update([x[1] for x in logs] )
__a : List[Any] = counter.most_common()
__a : Optional[int] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__a : str = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
__a : Dict = dict(sorted(r.items() , key=lambda lowerCAmelCase__ : item[1]["count"] , reverse=lowerCAmelCase__ ) )
return r
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
__a : List[Any] = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
__a : int = test.split('''/''' )[2]
else:
__a : str = None
return test
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any=None ):
__a : Optional[int] = [(x[0], x[1], get_model(x[2] )) for x in logs]
__a : Tuple = [x for x in logs if x[2] is not None]
__a : str = {x[2] for x in logs}
__a : Dict = {}
for test in tests:
__a : Optional[Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__a : List[str] = counter.most_common()
__a : List[Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__a : Tuple = sum(error_counts.values() )
if n_errors > 0:
__a : Optional[int] = {'''count''': n_errors, '''errors''': error_counts}
__a : Dict = dict(sorted(r.items() , key=lambda lowerCAmelCase__ : item[1]["count"] , reverse=lowerCAmelCase__ ) )
return r
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
__a : Optional[Any] = '''| no. | error | status |'''
__a : Any = '''|-:|:-|:-|'''
__a : Dict = [header, sep]
for error in reduced_by_error:
__a : int = reduced_by_error[error]['''count''']
__a : Optional[int] = f"| {count} | {error[:1_0_0]} | |"
lines.append(lowerCAmelCase__ )
return "\n".join(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Dict ):
__a : Any = '''| model | no. of errors | major error | count |'''
__a : List[str] = '''|-:|-:|-:|-:|'''
__a : Optional[int] = [header, sep]
for model in reduced_by_model:
__a : Union[str, Any] = reduced_by_model[model]['''count''']
__a , __a : Optional[int] = list(reduced_by_model[model]['''errors'''].items() )[0]
__a : Union[str, Any] = f"| {model} | {count} | {error[:6_0]} | {_count} |"
lines.append(lowerCAmelCase__ )
return "\n".join(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowercase__ =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase__ =get_job_links(args.workflow_run_id, token=args.token)
lowercase__ ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase__ =k.find(' / ')
lowercase__ =k[index + len(' / ') :]
lowercase__ =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase__ =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase__ =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase__ =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase__ =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase__ =reduce_by_error(errors)
lowercase__ =reduce_by_model(errors)
lowercase__ =make_github_table(reduced_by_error)
lowercase__ =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 521
| 1
|
__magic_name__ : List[str] = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__magic_name__ : Optional[int] = {value: key for key, value in MORSE_CODE_DICT.items()}
def lowercase__ ( _UpperCamelCase) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def lowercase__ ( _UpperCamelCase) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split())
def lowercase__ ( ) -> None:
"""simple docstring"""
UpperCamelCase = 'Morse code here!'
print(_UpperCamelCase)
UpperCamelCase = encrypt(_UpperCamelCase)
print(_UpperCamelCase)
UpperCamelCase = decrypt(_UpperCamelCase)
print(_UpperCamelCase)
if __name__ == "__main__":
main()
| 410
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__magic_name__ : List[str] = ''''''
__magic_name__ : str = ''''''
__magic_name__ : str = ''''''
__magic_name__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def lowercase__ ( ) -> None:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = get_dataset(_UpperCamelCase , _UpperCamelCase)
print('Processing...')
UpperCamelCase , UpperCamelCase , UpperCamelCase = update_image_and_anno(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
for index, image in enumerate(_UpperCamelCase):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase = random_chars(32)
UpperCamelCase = paths[index].split(os.sep)[-1].rsplit('.' , 1)[0]
UpperCamelCase = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , _UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85])
print(F'Success {index+1}/{len(_UpperCamelCase)} with {file_name}')
UpperCamelCase = []
for anno in new_annos[index]:
UpperCamelCase = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(_UpperCamelCase)
with open(F'/{file_root}.txt' , 'w') as outfile:
outfile.write('\n'.join(line for line in annos_list))
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> tuple[list, list]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
for label_file in glob.glob(os.path.join(_UpperCamelCase , '*.txt')):
UpperCamelCase = label_file.split(os.sep)[-1].rsplit('.' , 1)[0]
with open(_UpperCamelCase) as in_file:
UpperCamelCase = in_file.readlines()
UpperCamelCase = os.path.join(_UpperCamelCase , F'{label_name}.jpg')
UpperCamelCase = []
for obj_list in obj_lists:
UpperCamelCase = obj_list.rstrip('\n').split(' ')
boxes.append(
[
int(obj[0]),
float(obj[1]),
float(obj[2]),
float(obj[3]),
float(obj[4]),
])
if not boxes:
continue
img_paths.append(_UpperCamelCase)
labels.append(_UpperCamelCase)
return img_paths, labels
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1) -> tuple[list, list, list]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for idx in range(len(_UpperCamelCase)):
UpperCamelCase = []
UpperCamelCase = img_list[idx]
path_list.append(_UpperCamelCase)
UpperCamelCase = anno_list[idx]
UpperCamelCase = cva.imread(_UpperCamelCase)
if flip_type == 1:
UpperCamelCase = cva.flip(_UpperCamelCase , _UpperCamelCase)
for bbox in img_annos:
UpperCamelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]])
elif flip_type == 0:
UpperCamelCase = cva.flip(_UpperCamelCase , _UpperCamelCase)
for bbox in img_annos:
UpperCamelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]])
new_annos_lists.append(_UpperCamelCase)
new_imgs_list.append(_UpperCamelCase)
return new_imgs_list, new_annos_lists, path_list
def lowercase__ ( _UpperCamelCase = 32) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase = ascii_lowercase + digits
return "".join(random.choice(_UpperCamelCase) for _ in range(_UpperCamelCase))
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 410
| 1
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__UpperCamelCase : Dict = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : bool , _lowerCAmelCase : str = None , _lowerCAmelCase : list = None ) -> int:
"""simple docstring"""
__lowercase = None
__lowercase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
__lowercase = os.path.abspath("""examples""" )
for item in os.listdir(_lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
__lowercase = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if os.path.isfile(_lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowerCAmelCase , feature_script=_lowerCAmelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
__lowercase = compare_against_test(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = """\n""".join(_lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
__lowercase = diff.replace(_lowerCAmelCase , """""" )
self.assertEqual(_lowerCAmelCase , """""" )
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , _lowerCAmelCase )
self.one_complete_example("""complete_nlp_example.py""" , _lowerCAmelCase )
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
__lowercase = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.one_complete_example("""complete_cv_example.py""" , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :List[Any] = False
@classmethod
def _a ( cls : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().setUpClass()
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
__lowercase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def _a ( cls : Dict ) -> Tuple:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__lowercase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__lowercase = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
self.assertNotIn("""epoch 0:""" , _lowerCAmelCase )
self.assertIn("""epoch 1:""" , _lowerCAmelCase )
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__lowercase = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
if torch.cuda.is_available():
__lowercase = torch.cuda.device_count()
else:
__lowercase = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , _lowerCAmelCase )
self.assertIn("""epoch 1:""" , _lowerCAmelCase )
else:
self.assertIn("""epoch 0:""" , _lowerCAmelCase )
self.assertIn("""epoch 1:""" , _lowerCAmelCase )
@slow
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
__lowercase = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
__lowercase = re.findall("""({.+})""" , _lowerCAmelCase )
__lowercase = [r for r in results if """accuracy""" in r][-1]
__lowercase = ast.literal_eval(_lowerCAmelCase )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
__lowercase = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , """tracking""" ) ) )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 80
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def snake_case ( ):
'''simple docstring'''
__lowercase = [randint(-1_000 , 1_000 ) for i in range(10 )]
__lowercase = randint(-5_000 , 5_000 )
return (arr, r)
__UpperCamelCase : Any = make_dataset()
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for triplet in permutations(lowerCamelCase , 3 ):
if sum(lowerCamelCase ) == target:
return tuple(sorted(lowerCamelCase ) )
return (0, 0, 0)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
arr.sort()
__lowercase = len(lowerCamelCase )
for i in range(n - 1 ):
__lowercase , __lowercase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def snake_case ( ):
'''simple docstring'''
__lowercase = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
__lowercase = """
triplet_sum1(*dataset)
"""
__lowercase = """
triplet_sum2(*dataset)
"""
__lowercase = repeat(setup=lowerCamelCase , stmt=lowerCamelCase , repeat=5 , number=10_000 )
__lowercase = repeat(setup=lowerCamelCase , stmt=lowerCamelCase , repeat=5 , number=10_000 )
return (min(lowerCamelCase ), min(lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCamelCase : Tuple = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 80
| 1
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case : Optional[int] = 16
__snake_case : Any = 32
def _UpperCamelCase ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Any = 16 ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase_ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase_ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding='longest' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case : List[str] = mocked_dataloaders # noqa: F811
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _SCREAMING_SNAKE_CASE ) == "1":
lowerCAmelCase__ = 2
# Initialize accelerator
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config['lr']
lowerCAmelCase__ = int(config['num_epochs'] )
lowerCAmelCase__ = int(config['seed'] )
lowerCAmelCase__ = int(config['batch_size'] )
lowerCAmelCase__ = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(UpperCamelCase_ : Optional[Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 701
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
__snake_case : Union[str, Any] = 1_00
__snake_case : Tuple = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__snake_case : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def _UpperCamelCase ( UpperCamelCase_ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase__ = set()
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _UpperCamelCase ( UpperCamelCase_ : int = 5000 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , UpperCamelCase_ ):
if len(partition(UpperCamelCase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 365
| 0
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
lowercase_ = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
lowercase_ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowercase_ = v
else:
lowercase_ = v
lowercase_ = chkpt["""params"""]
lowercase_ = {n: v for n, v in config.items() if not isinstance(__lowerCAmelCase , (torch.FloatTensor, numpy.ndarray) )}
lowercase_ = chkpt["""dico_word2id"""]
lowercase_ = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
lowercase_ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowercase_ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowercase_ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__lowerCAmelCase , indent=2 ) + """\n""" )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__lowerCAmelCase , indent=2 ) + """\n""" )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase : str = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 567
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = "▁"
UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase : int = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase : Dict = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
UpperCAmelCase : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = []
lowercase__ = []
def __init__( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : Union[str, Any]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase_))
lowercase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ = 1
lowercase_ = len(self.sp_model)
lowercase_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase_)
}
lowercase_ = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
lowercase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
lowercase_ = src_lang if src_lang is not None else """en_XX"""
lowercase_ = self.lang_code_to_id[self._src_lang]
lowercase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : Dict):
"""simple docstring"""
lowercase_ = self.__dict__.copy()
lowercase_ = None
lowercase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_)
lowercase_ = [1] * len(self.prefix_tokens)
lowercase_ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase_)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase_)) + ([0] * len(lowerCAmelCase_)) + suffix_ones
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Any):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""")
lowercase_ = src_lang
lowercase_ = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = self.convert_tokens_to_ids(lowerCAmelCase_)
lowercase_ = tgt_lang_id
return inputs
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = {self.convert_ids_to_tokens(lowerCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ = self.sp_model.PieceToId(lowerCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = """""".join(lowerCAmelCase_).replace(lowerCAmelCase_ , """ """).strip()
return out_string
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase_ , """wb""") as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_)
return (out_vocab_file,)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
lowercase_ = src_lang
lowercase_ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.lang_code_to_id[src_lang]
lowercase_ = []
lowercase_ = [self.eos_token_id, self.cur_lang_code]
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = self.lang_code_to_id[lang]
lowercase_ = []
lowercase_ = [self.eos_token_id, self.cur_lang_code]
| 567
| 1
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
__a = int(number**0.5 )
return number == sq * sq
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
"""simple docstring"""
__a = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a = x_den * y_den * z_den
__a = gcd(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def __lowercase ( __SCREAMING_SNAKE_CASE = 35 ) -> int:
"""simple docstring"""
__a = set()
__a = 42
__a = Fraction(0 )
__a = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a = x_num * y_den + x_den * y_num
__a = x_den * y_den
__a = gcd(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a = add_three(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
unique_s.add(__SCREAMING_SNAKE_CASE )
# n=2
__a = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a = x_den * x_den * y_den * y_den
if is_sq(__SCREAMING_SNAKE_CASE ) and is_sq(__SCREAMING_SNAKE_CASE ):
__a = int(sqrt(__SCREAMING_SNAKE_CASE ) )
__a = int(sqrt(__SCREAMING_SNAKE_CASE ) )
__a = gcd(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a = add_three(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
unique_s.add(__SCREAMING_SNAKE_CASE )
# n=-1
__a = x_num * y_num
__a = x_den * y_num + x_num * y_den
__a = gcd(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a = add_three(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
unique_s.add(__SCREAMING_SNAKE_CASE )
# n=2
__a = x_num * x_num * y_num * y_num
__a = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__SCREAMING_SNAKE_CASE ) and is_sq(__SCREAMING_SNAKE_CASE ):
__a = int(sqrt(__SCREAMING_SNAKE_CASE ) )
__a = int(sqrt(__SCREAMING_SNAKE_CASE ) )
__a = gcd(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a = add_three(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
unique_s.add(__SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 201
|
'''simple docstring'''
from __future__ import annotations
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE( SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 'ibert'
def __init__( self: Union[str, Any] , UpperCamelCase: Optional[int]=3_05_22 , UpperCamelCase: List[Any]=7_68 , UpperCamelCase: Union[str, Any]=12 , UpperCamelCase: int=12 , UpperCamelCase: Optional[Any]=30_72 , UpperCamelCase: Union[str, Any]="gelu" , UpperCamelCase: List[str]=0.1 , UpperCamelCase: List[Any]=0.1 , UpperCamelCase: List[Any]=5_12 , UpperCamelCase: Optional[Any]=2 , UpperCamelCase: int=0.02 , UpperCamelCase: List[Any]=1e-12 , UpperCamelCase: int=1 , UpperCamelCase: List[Any]=0 , UpperCamelCase: str=2 , UpperCamelCase: Tuple="absolute" , UpperCamelCase: str=False , UpperCamelCase: Tuple="none" , **UpperCamelCase: Dict , ) -> List[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = position_embedding_type
snake_case__ = quant_mode
snake_case__ = force_dequant
class __SCREAMING_SNAKE_CASE( SCREAMING_SNAKE_CASE_ ):
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
if self.task == "multiple-choice":
snake_case__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 328
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
_snake_case = {
"gpt2": 1024,
"gpt2-medium": 1024,
"gpt2-large": 1024,
"gpt2-xl": 1024,
"distilgpt2": 1024,
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : str = VOCAB_FILES_NAMES
a_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] = ['input_ids', 'attention_mask']
a_ : Optional[Any] = GPTaTokenizer
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : int="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : str="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : List[str]=False , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = kwargs.pop('add_bos_token' , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('type' ) )
lowerCamelCase__ = add_prefix_space
lowerCamelCase__ = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = add_prefix_space
def _UpperCamelCase ( self : Dict , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : "Conversation" ):
lowerCamelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowerCamelCase__ = input_ids[-self.model_max_length :]
return input_ids
| 510
| 0
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> List[Any]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCAmelCase__ , n - 1 , lowerCAmelCase__ ) * a) % mod
else:
__a = binary_exponentiation(lowerCAmelCase__ , n / 2 , lowerCAmelCase__ )
return (b * b) % mod
# a prime number
lowercase_ = 7_0_1
lowercase_ = 1_0_0_0_0_0_0_0_0_0
lowercase_ = 1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 65
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase_ = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( __a ):
_A :Tuple = ['''image_processor''', '''tokenizer''']
_A :Tuple = '''BlipImageProcessor'''
_A :Dict = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : int , snake_case__ : Dict , snake_case__ : Optional[Any] ):
lowercase = False
super().__init__(snake_case__ , snake_case__ )
lowercase = self.image_processor
def __call__( self : int , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Tuple , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowercase = self.tokenizer
lowercase = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
# add pixel_values
lowercase = self.image_processor(snake_case__ , return_tensors=snake_case__ )
if text is not None:
lowercase = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
else:
lowercase = None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self : Any , *snake_case__ : int , **snake_case__ : Any ):
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str , *snake_case__ : List[str] , **snake_case__ : List[Any] ):
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase = self.tokenizer.model_input_names
lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 428
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( __a ):
def __init__( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[Any]=13 , snake_case__ : Dict=7 , snake_case__ : Union[str, Any]=True , snake_case__ : str=True , snake_case__ : Optional[int]=True , snake_case__ : Tuple=True , snake_case__ : Any=True , snake_case__ : Union[str, Any]=False , snake_case__ : Any=False , snake_case__ : Optional[int]=False , snake_case__ : int=2 , snake_case__ : str=99 , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=32 , snake_case__ : Any=5 , snake_case__ : str=4 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : str=5_12 , snake_case__ : Union[str, Any]=12 , snake_case__ : Any=2 , snake_case__ : Tuple=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : Tuple=4 , snake_case__ : Union[str, Any]="last" , snake_case__ : int=None , snake_case__ : Union[str, Any]=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_lengths
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = gelu_activation
lowercase = sinusoidal_embeddings
lowercase = causal
lowercase = asm
lowercase = n_langs
lowercase = vocab_size
lowercase = n_special
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = summary_type
lowercase = use_proj
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_input_lengths:
lowercase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , 2 ).float()
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : List[Any] , ):
lowercase = FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
lowercase = model(snake_case__ , langs=snake_case__ )
lowercase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Dict , snake_case__ : Optional[int] , ):
lowercase = FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : List[Any] , ):
lowercase = FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ )
lowercase = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : str , ):
lowercase = FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ )
lowercase = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
lowercase = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((lowercase) , ) = result_with_labels.to_tuple()
lowercase = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((lowercase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] , ):
lowercase = FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ )
lowercase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : int , ):
lowercase = self.num_labels
lowercase = FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , ):
lowercase = self.num_choices
lowercase = FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __a , __a , unittest.TestCase ):
_A :List[Any] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_A :Optional[Any] = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Any , snake_case__ : Optional[int] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : List[Any]=False ):
lowercase = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowercase = FlaubertModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase = True
lowercase = model_class(config=snake_case__ )
lowercase = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase = torch.jit.trace(
snake_case__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , """traced_model.pt""" ) )
lowercase = torch.jit.load(os.path.join(snake_case__ , """traced_model.pt""" ) , map_location=snake_case__ )
loaded(inputs_dict["""input_ids"""].to(snake_case__ ) , inputs_dict["""attention_mask"""].to(snake_case__ ) )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowercase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
lowercase = model(snake_case__ )[0]
lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , snake_case__ )
lowercase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
| 428
| 1
|
from math import isclose, sqrt
def __lowerCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] ) -> Any:
lowerCamelCase_ = point_y / 4 / point_x
lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase_ = outgoing_gradient**2 + 4
lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
lowerCamelCase_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase_ = x_minus if isclose(__lowerCAmelCase , __lowerCAmelCase ) else x_plus
lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[int] = 1.4 , UpperCAmelCase__ : List[Any] = -9.6 ) -> Optional[Any]:
lowerCamelCase_ = 0
lowerCamelCase_ = first_x_coord
lowerCamelCase_ = first_y_coord
lowerCamelCase_ = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
lowerCamelCase_ = next_point(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 718
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase = logging.get_logger(__name__)
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self : Any , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : float = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Optional[int] , ):
super().__init__(**__UpperCamelCase )
lowerCamelCase_ = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase_ = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase_ = resample
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : List[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : float , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , ):
lowerCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase_ = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase_ = int(shortest_edge / crop_pct )
lowerCamelCase_ = get_resize_output_image_size(__UpperCamelCase , size=__UpperCamelCase , default_to_square=__UpperCamelCase )
lowerCamelCase_ = resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__UpperCamelCase , size=(shortest_edge, shortest_edge) , data_format=__UpperCamelCase , **__UpperCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__UpperCamelCase , size=(shortest_edge, shortest_edge) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : str , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[int, float] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , ):
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , ):
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : float = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase : List[str] , ):
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
lowerCamelCase_ = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , crop_pct=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
lowerCamelCase_ = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 103
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = """▁"""
_lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowerCamelCase = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowerCamelCase = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowerCamelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[Any] =VOCAB_FILES_NAMES
__A : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Dict =PRETRAINED_VOCAB_FILES_MAP
__A : Dict =["input_ids", "attention_mask"]
__A : List[int] =[]
__A : List[int] =[]
def __init__( self ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case="</s>" ,_snake_case="</s>" ,_snake_case="<s>" ,_snake_case="<unk>" ,_snake_case="<pad>" ,_snake_case="<mask>" ,_snake_case = None ,**_snake_case ,):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Optional[Any] = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else mask_token
UpperCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ : Optional[Any] = kwargs.get("additional_special_tokens" ,[] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_snake_case ,tgt_lang=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,mask_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
UpperCAmelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
UpperCAmelCase_ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Union[str, Any] = len(self.sp_model )
UpperCAmelCase_ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_snake_case )
}
UpperCAmelCase_ : List[Any] = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_ : int = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_ : Optional[int] = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase_ : List[Any] = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase__ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self ):
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
UpperCAmelCase_ : List[Any] = self.__dict__.copy()
UpperCAmelCase_ : List[str] = None
return state
def __setstate__( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self ,_snake_case ):
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Dict = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self ,_snake_case ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : int = ""
UpperCAmelCase_ : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case ) + token
UpperCAmelCase_ : int = True
UpperCAmelCase_ : List[Any] = []
else:
current_sub_tokens.append(_snake_case )
UpperCAmelCase_ : Any = False
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : Optional[Any] = os.path.join(
_snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,"wb" ) as fi:
UpperCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
UpperCAmelCase_ : Union[str, Any] = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_snake_case )) + suffix_ones
return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ : str = src_lang
UpperCAmelCase_ : Dict = self(_snake_case ,add_special_tokens=_snake_case ,return_tensors=_snake_case ,**_snake_case )
UpperCAmelCase_ : List[str] = self.convert_tokens_to_ids(_snake_case )
UpperCAmelCase_ : Optional[Any] = tgt_lang_id
return inputs
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = "en_XX" ,_snake_case = None ,_snake_case = "ro_RO" ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = src_lang
UpperCAmelCase_ : int = tgt_lang
return super().prepare_seqaseq_batch(_snake_case ,_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = self.lang_code_to_id[src_lang]
UpperCAmelCase_ : List[str] = [self.cur_lang_code_id]
UpperCAmelCase_ : Dict = [self.eos_token_id]
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : int = self.lang_code_to_id[tgt_lang]
UpperCAmelCase_ : Optional[Any] = [self.cur_lang_code_id]
UpperCAmelCase_ : int = [self.eos_token_id]
| 71
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowerCamelCase_ : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
lowerCamelCase_ : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def UpperCAmelCase__ (self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
lowerCamelCase_ : Tuple = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase__ (self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
lowerCamelCase_ : List[str] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase__ (self ):
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
lowerCamelCase_ : Tuple = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
__lowercase : Dict = Accelerator()
__lowercase : Optional[Any] = (accelerator.state.process_index + 2, 10)
__lowercase : int = torch.randint(0, 10, shape).to(accelerator.device)
__lowercase : str = ''''''
__lowercase : Tuple = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__lowercase : Tuple = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__lowercase : Union[str, Any] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 422
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A_ ( __lowercase , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] = BarthezTokenizer
_SCREAMING_SNAKE_CASE : int = BarthezTokenizerFast
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Optional[Any] = True
def snake_case__ ( self) -> Any:
"""simple docstring"""
super().setUp()
_UpperCAmelCase : Any = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''')
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_A)
_UpperCAmelCase : int = tokenizer
def snake_case__ ( self) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = '''<pad>'''
_UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A) , _A)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A) , _A)
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-1] , '''<mask>''')
self.assertEqual(len(_A) , 101122)
def snake_case__ ( self) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101122)
@require_torch
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_UpperCAmelCase : Optional[Any] = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase : List[str] = self.tokenizer(
_A , max_length=len(_A) , padding=_A , truncation=_A , return_tensors='''pt''')
self.assertIsInstance(_A , _A)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
_UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A)
def snake_case__ ( self) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : Dict = self.get_rust_tokenizer()
_UpperCAmelCase : str = '''I was born in 92000, and this is falsé.'''
_UpperCAmelCase : List[str] = tokenizer.tokenize(_A)
_UpperCAmelCase : Any = rust_tokenizer.tokenize(_A)
self.assertListEqual(_A , _A)
_UpperCAmelCase : Any = tokenizer.encode(_A , add_special_tokens=_A)
_UpperCAmelCase : Tuple = rust_tokenizer.encode(_A , add_special_tokens=_A)
self.assertListEqual(_A , _A)
_UpperCAmelCase : List[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : List[Any] = tokenizer.encode(_A)
_UpperCAmelCase : List[Any] = rust_tokenizer.encode(_A)
self.assertListEqual(_A , _A)
@slow
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase : List[str] = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=_A , )
| 186
|
import heapq
def _lowerCamelCase ( __A : dict ) -> set[int]:
_UpperCAmelCase : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__A , [-1 * len(__A ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCAmelCase : Union[str, Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCAmelCase : Tuple = heapq.heappop(__A )[1][0]
chosen_vertices.add(__A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCAmelCase : Dict = elem[1][1].index(__A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 186
| 1
|
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
SCREAMING_SNAKE_CASE__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
SCREAMING_SNAKE_CASE__ = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
SCREAMING_SNAKE_CASE__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
SCREAMING_SNAKE_CASE__ = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
SCREAMING_SNAKE_CASE__ = 'allenai'
def lowercase__ ( __UpperCamelCase )-> Any:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCamelCase = dict((re.sub(R"""@@$""" , """""" , __UpperCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , __UpperCamelCase ), v) for k, v in d.items() )
UpperCamelCase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F"{k}</w>"]
UpperCamelCase = d[k] # restore
return da
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
# prep
assert os.path.exists(__UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
print(F"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
UpperCamelCase = basename(__UpperCamelCase )
UpperCamelCase = dirname(__UpperCamelCase )
UpperCamelCase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCamelCase = cls.hub_models()
UpperCamelCase = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
UpperCamelCase = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"using checkpoint {checkpoint_file}" )
UpperCamelCase = hub_utils.from_pretrained(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , archive_map=__UpperCamelCase , **__UpperCamelCase )
UpperCamelCase = vars(chkpt["""args"""]["""model"""] )
UpperCamelCase = args["""source_lang"""]
UpperCamelCase = args["""target_lang"""]
UpperCamelCase = dirname(__UpperCamelCase )
UpperCamelCase = basename(__UpperCamelCase )
# dicts
UpperCamelCase = os.path.join(__UpperCamelCase , F"dict.{src_lang}.txt" )
UpperCamelCase = os.path.join(__UpperCamelCase , F"dict.{tgt_lang}.txt" )
UpperCamelCase = Dictionary.load(__UpperCamelCase )
UpperCamelCase = rewrite_dict_keys(src_dict.indices )
UpperCamelCase = len(__UpperCamelCase )
UpperCamelCase = os.path.join(__UpperCamelCase , """vocab-src.json""" )
print(F"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__UpperCamelCase , ensure_ascii=__UpperCamelCase , indent=__UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCamelCase = True
for k in src_vocab.keys():
if not k.islower():
UpperCamelCase = False
break
UpperCamelCase = Dictionary.load(__UpperCamelCase )
UpperCamelCase = rewrite_dict_keys(tgt_dict.indices )
UpperCamelCase = len(__UpperCamelCase )
UpperCamelCase = os.path.join(__UpperCamelCase , """vocab-tgt.json""" )
print(F"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__UpperCamelCase , ensure_ascii=__UpperCamelCase , indent=__UpperCamelCase ) )
# merges_file (bpecodes)
UpperCamelCase = os.path.join(__UpperCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCamelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
break
with open(__UpperCamelCase , encoding="""utf-8""" ) as fin:
UpperCamelCase = fin.read()
UpperCamelCase = re.sub(R""" \d+$""" , """""" , __UpperCamelCase , 0 , re.M ) # remove frequency number
print(F"Generating {merges_file}" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as fout:
fout.write(__UpperCamelCase )
# model config
UpperCamelCase = os.path.join(__UpperCamelCase , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", F"need to extend tokenizer to support bpe={args['tokenizer']}"
UpperCamelCase = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
UpperCamelCase = 5
UpperCamelCase = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCamelCase = best_score_hparams[model_dir]["""length_penalty"""]
else:
UpperCamelCase = 1.0
print(F"Generating {fsmt_model_config_file}" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__UpperCamelCase , ensure_ascii=__UpperCamelCase , indent=__UpperCamelCase ) )
# tokenizer config
UpperCamelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(F"Generating {fsmt_tokenizer_config_file}" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__UpperCamelCase , ensure_ascii=__UpperCamelCase , indent=__UpperCamelCase ) )
# model
UpperCamelCase = chkpt["""models"""][0]
UpperCamelCase = model.state_dict()
# rename keys to start with 'model.'
UpperCamelCase = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCamelCase = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = FSMTConfig.from_pretrained(__UpperCamelCase )
UpperCamelCase = FSMTForConditionalGeneration(__UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
# save
UpperCamelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
print(F"Generating {pytorch_weights_dump_path}" )
torch.save(__UpperCamelCase , __UpperCamelCase )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F"cd {data_root}" )
print(F"transformers-cli upload {model_dir}" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 301
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a_ ( lowerCamelCase ):
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """BlipImageProcessor"""
lowercase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = False
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
UpperCamelCase = self.tokenizer
UpperCamelCase = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
return text_encoding
# add pixel_values
UpperCamelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(_SCREAMING_SNAKE_CASE )
return encoding_image_processor
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 301
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = """Salesforce/blip-image-captioning-base"""
a__ : str = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a__ : Tuple = """image_captioner"""
a__ : Dict = AutoModelForVisionaSeq
a__ : int = ["""image"""]
a__ : List[str] = ["""text"""]
def __init__( self , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(self , ['''vision'''])
super().__init__(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> Dict:
return self.pre_processor(images=__lowercase , return_tensors='''pt''')
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
return self.model.generate(**__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> Optional[int]:
return self.pre_processor.batch_decode(__lowercase , skip_special_tokens=__lowercase)[0].strip()
| 452
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''')
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained('''google/mt5-small''')
__UpperCamelCase :Optional[Any] = tokenizer('''Hello there''' , return_tensors='''np''').input_ids
__UpperCamelCase :List[str] = tokenizer('''Hi I am''' , return_tensors='''np''').input_ids
__UpperCamelCase :Optional[int] = shift_tokens_right(__lowercase , model.config.pad_token_id , model.config.decoder_start_token_id)
__UpperCamelCase :Tuple = model(__lowercase , decoder_input_ids=__lowercase).logits
__UpperCamelCase :Any = optax.softmax_cross_entropy(__lowercase , onehot(__lowercase , logits.shape[-1])).mean()
__UpperCamelCase :str = -(labels.shape[-1] * loss.item())
__UpperCamelCase :Optional[Any] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
| 452
| 1
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[Any] = '''encodec'''
def __init__( self : List[Any] , lowerCAmelCase__ : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase__ : Tuple=2_4_0_0_0 , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Any=1_2_8 , lowerCAmelCase__ : Optional[Any]=3_2 , lowerCAmelCase__ : Union[str, Any]=1 , lowerCAmelCase__ : str=[8, 5, 4, 2] , lowerCAmelCase__ : List[Any]="weight_norm" , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Tuple=7 , lowerCAmelCase__ : Any=3 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Dict="reflect" , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : int=1_0_2_4 , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : int=True , **lowerCAmelCase__ : str , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = target_bandwidths
__SCREAMING_SNAKE_CASE : Optional[int] = sampling_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_channels
__SCREAMING_SNAKE_CASE : int = normalize
__SCREAMING_SNAKE_CASE : Optional[int] = chunk_length_s
__SCREAMING_SNAKE_CASE : Any = overlap
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = num_filters
__SCREAMING_SNAKE_CASE : Optional[Any] = num_residual_layers
__SCREAMING_SNAKE_CASE : Tuple = upsampling_ratios
__SCREAMING_SNAKE_CASE : str = norm_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = kernel_size
__SCREAMING_SNAKE_CASE : str = last_kernel_size
__SCREAMING_SNAKE_CASE : Any = residual_kernel_size
__SCREAMING_SNAKE_CASE : str = dilation_growth_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = use_causal_conv
__SCREAMING_SNAKE_CASE : Dict = pad_mode
__SCREAMING_SNAKE_CASE : List[Any] = compress
__SCREAMING_SNAKE_CASE : Dict = num_lstm_layers
__SCREAMING_SNAKE_CASE : str = trim_right_ratio
__SCREAMING_SNAKE_CASE : int = codebook_size
__SCREAMING_SNAKE_CASE : List[str] = codebook_dim if codebook_dim is not None else hidden_size
__SCREAMING_SNAKE_CASE : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCAmelCase__ )
@property
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 578
|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __lt__( self : Union[str, Any] , lowerCAmelCase__ : Any ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : Optional[int] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def lowerCAmelCase_ ( _lowerCamelCase: list ):
__SCREAMING_SNAKE_CASE : list[Stack] = []
# sort into stacks
for element in collection:
__SCREAMING_SNAKE_CASE : str = Stack([element] )
__SCREAMING_SNAKE_CASE : Tuple = bisect_left(_lowerCamelCase , _lowerCamelCase )
if i != len(_lowerCamelCase ):
stacks[i].append(_lowerCamelCase )
else:
stacks.append(_lowerCamelCase )
# use a heap-based merge to merge stack efficiently
__SCREAMING_SNAKE_CASE : List[Any] = merge(*(reversed(_lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCamelCase__ : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ : Any = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 578
| 1
|
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = set()
# edges = list of graph's edges
lowercase = get_edges(lowerCAmelCase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowercase , lowercase = edges.pop()
chosen_vertices.add(lowerCAmelCase__ )
chosen_vertices.add(lowerCAmelCase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCAmelCase__ )
return chosen_vertices
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 633
|
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 633
| 1
|
'''simple docstring'''
import numpy as np
from PIL import Image
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[str] = np.array(snake_case__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
A : int = 0
A : Any = 0
A : Optional[Any] = 0
A : Any = 0
# compute the shape of the output matrix
A : List[str] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
A : List[Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
A : Tuple = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A : Optional[int] = 0
A : Any = 0
return updated_arr
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = np.array(snake_case__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
A : Union[str, Any] = 0
A : List[str] = 0
A : List[str] = 0
A : List[str] = 0
# compute the shape of the output matrix
A : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
A : Tuple = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
A : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A : Any = 0
A : Optional[int] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowercase : Tuple = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 634
|
'''simple docstring'''
import numpy
# List of input, output pairs
lowercase : Any = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowercase : str = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
lowercase : Union[str, Any] = [2, 4, 1, 5]
lowercase : Any = len(train_data)
lowercase : Optional[int] = 0.0_09
def lowerCAmelCase_ ( snake_case__ , snake_case__="train" ):
'''simple docstring'''
return calculate_hypothesis_value(snake_case__ , snake_case__ ) - output(
snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = 0
for i in range(len(snake_case__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCAmelCase_ ( snake_case__ , snake_case__=m ):
'''simple docstring'''
A : List[Any] = 0
for i in range(snake_case__ ):
if index == -1:
summation_value += _error(snake_case__ )
else:
summation_value += _error(snake_case__ ) * train_data[i][0][index]
return summation_value
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[Any] = summation_of_cost_derivative(snake_case__ , snake_case__ ) / m
return cost_derivative_value
def lowerCAmelCase_ ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
A : Dict = 0.00_00_02
A : Optional[Any] = 0
A : int = 0
while True:
j += 1
A : List[str] = [0, 0, 0, 0]
for i in range(0 , len(snake_case__ ) ):
A : Union[str, Any] = get_cost_derivative(i - 1 )
A : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
snake_case__ , snake_case__ , atol=snake_case__ , rtol=snake_case__ , ):
break
A : List[Any] = temp_parameter_vector
print(('''Number of iterations:''', j) )
def lowerCAmelCase_ ( ):
'''simple docstring'''
for i in range(len(snake_case__ ) ):
print(('''Actual output value:''', output(snake_case__ , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(snake_case__ , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 634
| 1
|
'''simple docstring'''
lowerCAmelCase_ : List[Any] = [
'''DownloadConfig''',
'''DownloadManager''',
'''DownloadMode''',
'''StreamingDownloadManager''',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 702
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
snake_case : int = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
snake_case : str = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = AudioClassificationPipeline(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
# test with a raw waveform
_UpperCAmelCase : Any = np.zeros((3_4_0_0_0,) )
_UpperCAmelCase : Optional[Any] = np.zeros((1_4_0_0_0,) )
return audio_classifier, [audioa, audio]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : str = examples
_UpperCAmelCase : Union[str, Any] = audio_classifier(lowerCAmelCase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCAmelCase__ , [
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
] , )
_UpperCAmelCase : int = audio_classifier(lowerCAmelCase__ , top_k=1 )
self.assertEqual(
lowerCAmelCase__ , [
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
] , )
self.run_torchaudio(lowerCAmelCase__ )
@require_torchaudio
def snake_case_ (self , lowerCAmelCase__ ):
import datasets
# test with a local file
_UpperCAmelCase : Optional[int] = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_UpperCAmelCase : List[str] = dataset[0]["""audio"""]["""array"""]
_UpperCAmelCase : Union[str, Any] = audio_classifier(lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
] , )
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_UpperCAmelCase : List[Any] = pipeline("""audio-classification""" , model=lowerCAmelCase__ )
_UpperCAmelCase : str = np.ones((8_0_0_0,) )
_UpperCAmelCase : Tuple = audio_classifier(lowerCAmelCase__ , top_k=4 )
_UpperCAmelCase : List[Any] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_UpperCAmelCase : Any = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_UpperCAmelCase : Any = {"""array""": np.ones((8_0_0_0,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_UpperCAmelCase : List[Any] = audio_classifier(lowerCAmelCase__ , top_k=4 )
self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def snake_case_ (self ):
import datasets
_UpperCAmelCase : int = """superb/wav2vec2-base-superb-ks"""
_UpperCAmelCase : List[Any] = pipeline("""audio-classification""" , model=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_UpperCAmelCase : Union[str, Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_UpperCAmelCase : Optional[int] = audio_classifier(lowerCAmelCase__ , top_k=4 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def snake_case_ (self ):
pass
| 156
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Tuple = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = 'pegasus'
_snake_case : str = ['past_key_values']
_snake_case : List[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[str] , lowerCAmelCase__ : int=50265 , lowerCAmelCase__ : int=1024 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : Union[str, Any]=4096 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : Dict=12 , lowerCAmelCase__ : Optional[Any]=4096 , lowerCAmelCase__ : List[str]=16 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Optional[Any]=1024 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : Optional[int]=0 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : int=1 , lowerCAmelCase__ : Tuple=1 , **lowerCAmelCase__ : str , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = encoder_layers
_UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
@property
def snake_case__ ( self : Dict ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
return self.d_model
| 98
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a__ ( lowercase : Tuple, lowercase : List[str], lowercase : Optional[int], lowercase : List[str], lowercase : List[str]=True, lowercase : str="pt" ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {'''add_prefix_space''': True} if isinstance(lowercase, lowercase ) and not line.startswith(''' ''' ) else {}
_UpperCamelCase = padding_side
return tokenizer(
[line], max_length=lowercase, padding='''max_length''' if pad_to_max_length else None, truncation=lowercase, return_tensors=lowercase, add_special_tokens=lowercase, **lowercase, )
def a__ ( lowercase : str, lowercase : int, lowercase : Tuple=None, ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = input_ids.ne(lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str="train" , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Dict="" , ) -> Any:
'''simple docstring'''
super().__init__()
_UpperCamelCase = Path(lowerCAmelCase__ ).joinpath(type_path + '''.source''' )
_UpperCamelCase = Path(lowerCAmelCase__ ).joinpath(type_path + '''.target''' )
_UpperCamelCase = self.get_char_lens(self.src_file )
_UpperCamelCase = max_source_length
_UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
_UpperCamelCase = tokenizer
_UpperCamelCase = prefix
if n_obs is not None:
_UpperCamelCase = self.src_lens[:n_obs]
_UpperCamelCase = src_lang
_UpperCamelCase = tgt_lang
def __len__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : Optional[Any] , lowerCAmelCase__ : Any ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCamelCase = index + 1 # linecache starts at 1
_UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip('''\n''' )
_UpperCamelCase = linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
_UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
_UpperCamelCase = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , '''right''' )
_UpperCamelCase = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , '''right''' )
_UpperCamelCase = source_inputs['''input_ids'''].squeeze()
_UpperCamelCase = target_inputs['''input_ids'''].squeeze()
_UpperCamelCase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[str] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCamelCase = torch.stack([x['''input_ids'''] for x in batch] )
_UpperCamelCase = torch.stack([x['''attention_mask'''] for x in batch] )
_UpperCamelCase = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
_UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
_UpperCamelCase = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowercase__ : Union[str, Any] = getLogger(__name__)
def a__ ( lowercase : List[List] ) -> Optional[int]:
"""simple docstring"""
return list(itertools.chain.from_iterable(lowercase ) )
def a__ ( lowercase : str ) -> None:
"""simple docstring"""
_UpperCamelCase = get_git_info()
save_json(lowercase, os.path.join(lowercase, '''git_log.json''' ) )
def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : str=4, **lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
with open(lowercase, '''w''' ) as f:
json.dump(lowercase, lowercase, indent=lowercase, **lowercase )
def a__ ( lowercase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
with open(lowercase ) as f:
return json.load(lowercase )
def a__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = git.Repo(search_parent_directories=lowercase )
_UpperCamelCase = {
'''repo_id''': str(lowercase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def a__ ( lowercase : Callable, lowercase : Iterable ) -> List:
"""simple docstring"""
return list(map(lowercase, lowercase ) )
def a__ ( lowercase : List[Any], lowercase : Dict ) -> Optional[int]:
"""simple docstring"""
with open(lowercase, '''wb''' ) as f:
return pickle.dump(lowercase, lowercase )
def a__ ( lowercase : Tuple ) -> Tuple:
"""simple docstring"""
def remove_articles(lowercase : Tuple ):
return re.sub(r'''\b(a|an|the)\b''', ''' ''', lowercase )
def white_space_fix(lowercase : Tuple ):
return " ".join(text.split() )
def remove_punc(lowercase : Optional[int] ):
_UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase ) ) ) )
def a__ ( lowercase : Tuple, lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = normalize_answer(lowercase ).split()
_UpperCamelCase = normalize_answer(lowercase ).split()
_UpperCamelCase = Counter(lowercase ) & Counter(lowercase )
_UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCamelCase = 1.0 * num_same / len(lowercase )
_UpperCamelCase = 1.0 * num_same / len(lowercase )
_UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowercase : Tuple, lowercase : Any ) -> List[str]:
"""simple docstring"""
return normalize_answer(lowercase ) == normalize_answer(lowercase )
def a__ ( lowercase : List[str], lowercase : List[str] ) -> Dict:
"""simple docstring"""
assert len(lowercase ) == len(lowercase )
_UpperCamelCase = 0
for hypo, pred in zip(lowercase, lowercase ):
em += exact_match_score(lowercase, lowercase )
if len(lowercase ) > 0:
em /= len(lowercase )
return {"em": em}
def a__ ( lowercase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def a__ ( lowercase : int, lowercase : List[Any], lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCamelCase = '''dropout_rate'''
for p in extra_params:
if getattr(lowercase, lowercase, lowercase ):
if not hasattr(lowercase, lowercase ) and not hasattr(lowercase, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowercase ) )
delattr(lowercase, lowercase )
continue
_UpperCamelCase = p if hasattr(lowercase, lowercase ) else equivalent_param[p]
setattr(lowercase, lowercase, getattr(lowercase, lowercase ) )
delattr(lowercase, lowercase )
return hparams, config
| 98
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( _a , _a):
if b == 0:
return (1, 0)
(SCREAMING_SNAKE_CASE) : Tuple = extended_euclid(_a , a % b)
SCREAMING_SNAKE_CASE : Dict = a // b
return (y, x - k * y)
def lowerCamelCase__ ( _a , _a , _a , _a):
(SCREAMING_SNAKE_CASE) : str = extended_euclid(_a , _a)
SCREAMING_SNAKE_CASE : str = na * na
SCREAMING_SNAKE_CASE : int = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCamelCase__ ( _a , _a):
(SCREAMING_SNAKE_CASE) : List[str] = extended_euclid(_a , _a)
if b < 0:
SCREAMING_SNAKE_CASE : int = (b % n + n) % n
return b
def lowerCamelCase__ ( _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = invert_modulo(_a , _a), invert_modulo(_a , _a)
SCREAMING_SNAKE_CASE : Optional[int] = na * na
SCREAMING_SNAKE_CASE : Optional[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 709
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='gptj'
lowerCamelCase__ ={
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Dict , a : Optional[Any]=5_0400 , a : List[str]=2048 , a : List[Any]=4096 , a : int=28 , a : Union[str, Any]=16 , a : List[Any]=64 , a : int=None , a : Optional[int]="gelu_new" , a : Optional[Any]=0.0 , a : Any=0.0 , a : Union[str, Any]=0.0 , a : Union[str, Any]=1e-5 , a : Any=0.02 , a : Optional[int]=True , a : Tuple=5_0256 , a : Union[str, Any]=5_0256 , a : List[Any]=False , **a : str , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : int = n_positions
SCREAMING_SNAKE_CASE : Tuple = n_embd
SCREAMING_SNAKE_CASE : Tuple = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : Tuple = n_inner
SCREAMING_SNAKE_CASE : Any = rotary_dim
SCREAMING_SNAKE_CASE : str = activation_function
SCREAMING_SNAKE_CASE : int = resid_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = embd_pdrop
SCREAMING_SNAKE_CASE : Tuple = attn_pdrop
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = bos_token_id
SCREAMING_SNAKE_CASE : List[Any] = eos_token_id
super().__init__(
bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a )
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Optional[int] , a : PretrainedConfig , a : str = "default" , a : List[PatchingSpec] = None , a : bool = False , ) -> Any:
"""simple docstring"""
super().__init__(a , task=a , patching_specs=a , use_past=a )
if not getattr(self._config , "pad_token_id" , a ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE : Dict = 0
@property
def __UpperCamelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(a , direction="inputs" )
SCREAMING_SNAKE_CASE : int = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE : Any = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
return self._config.n_head
def __UpperCamelCase ( self : str , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = super(a , self ).generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE : Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : Any = seqlen + 2
SCREAMING_SNAKE_CASE : Dict = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE : str = [
(torch.zeros(a ), torch.zeros(a )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE : Optional[int] = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE : List[str] = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a , a , dtype=a )] , dim=1 )
return ordered_inputs
@property
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return 13
| 193
| 0
|
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662
| 1
|
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__SCREAMING_SNAKE_CASE :Optional[Any] = logging.getLogger()
__SCREAMING_SNAKE_CASE :Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : List[str] , snake_case_ : Union[str, Any] ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_UpperCAmelCase = {"source": "What is love ?", "target": "life"}
_UpperCAmelCase = {"train": 1_2, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCAmelCase = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'{split}.{field}' ) , "w" ) as f:
f.write(snake_case_ )
def lowercase ( self : Any , snake_case_ : int , snake_case_ : str = "pytorch" ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = os.path.join(snake_case_ , "output" )
_UpperCAmelCase = os.path.join(snake_case_ , "data" )
self._create_dummy_data(data_dir=snake_case_ )
_UpperCAmelCase = f'\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '.split()
if gpus > 0:
testargs.append(f'--gpus={gpus}' )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
_UpperCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
_UpperCAmelCase = os.path.join(snake_case_ , "metrics.json" )
with open(snake_case_ ) as f:
_UpperCAmelCase = json.load(snake_case_ )
return result
@require_torch_gpu
def lowercase ( self : Tuple ):
_UpperCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase ( self : Tuple ):
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 119
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def UpperCAmelCase_ ( __lowercase : str ) -> str: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def UpperCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
_UpperCAmelCase = [1, 2, 3]
with pytest.raises(__lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(__lowercase , __lowercase , num_proc=2 )
with pytest.raises(__lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(__lowercase , __lowercase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def UpperCAmelCase_ ( __lowercase : Dict ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = [1, 2]
_UpperCAmelCase = {"a": 1, "b": 2}
_UpperCAmelCase = {"a": [1, 2], "b": [3, 4]}
_UpperCAmelCase = {"a": {"1": 1}, "b": 2}
_UpperCAmelCase = {"a": 1, "b": 2, "c": 3, "d": 4}
_UpperCAmelCase = [2, 3]
_UpperCAmelCase = {"a": 2, "b": 3}
_UpperCAmelCase = {"a": [2, 3], "b": [4, 5]}
_UpperCAmelCase = {"a": {"1": 2}, "b": 3}
_UpperCAmelCase = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__lowercase , __lowercase , num_proc=__lowercase ) == expected_map_nested_sa
assert map_nested(__lowercase , __lowercase , num_proc=__lowercase ) == expected_map_nested_sa
assert map_nested(__lowercase , __lowercase , num_proc=__lowercase ) == expected_map_nested_sa
assert map_nested(__lowercase , __lowercase , num_proc=__lowercase ) == expected_map_nested_sa
assert map_nested(__lowercase , __lowercase , num_proc=__lowercase ) == expected_map_nested_sa
| 119
| 1
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowerCAmelCase = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
_lowerCAmelCase = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
_lowerCAmelCase = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowerCAmelCase = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowerCAmelCase = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def _snake_case ( __snake_case , __snake_case ):
for tf_name, hf_name in patterns:
_UpperCamelCase = k.replace(__snake_case , __snake_case )
return k
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = BigBirdPegasusConfig(**__snake_case )
_UpperCamelCase = BigBirdPegasusForConditionalGeneration(__snake_case )
_UpperCamelCase = torch_model.state_dict()
_UpperCamelCase = {}
# separating decoder weights
_UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
_UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
_UpperCamelCase = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE]
if any(__snake_case ):
continue
_UpperCamelCase = DECODER_PATTERNS
_UpperCamelCase = rename_state_dict_key(__snake_case , __snake_case )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
_UpperCamelCase = v.T
_UpperCamelCase = torch.from_numpy(__snake_case )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
_UpperCamelCase = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE]
if any(__snake_case ):
continue
_UpperCamelCase = REMAINING_PATTERNS
_UpperCamelCase = rename_state_dict_key(__snake_case , __snake_case )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
_UpperCamelCase = v.T
_UpperCamelCase = torch.from_numpy(__snake_case )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
_UpperCamelCase = mapping['''model.embed_positions.weight''']
_UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
_UpperCamelCase , _UpperCamelCase = torch_model.load_state_dict(__snake_case , strict=__snake_case )
_UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def _snake_case ( __snake_case ):
_UpperCamelCase = tf.train.list_variables(__snake_case )
_UpperCamelCase = {}
_UpperCamelCase = ['''global_step''']
for name, shape in tqdm(__snake_case , desc='''converting tf checkpoint to dict''' ):
_UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
_UpperCamelCase = array
return tf_weights
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = get_tf_weights_as_numpy(__snake_case )
_UpperCamelCase = convert_bigbird_pegasus(__snake_case , __snake_case )
torch_model.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 10
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.0_2 , A__=4 , ) -> Optional[Any]:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_attention_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_choices
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_attention_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self ) -> Tuple:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = True
snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = FlaxBertModelTester(self )
@slow
def UpperCamelCase ( self ) -> List[str]:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
snake_case = FlaxBertModel.from_pretrained('''bert-base-cased''' )
snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(A__ )
| 342
| 0
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__UpperCAmelCase = "scheduler_config.json"
class a ( lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : Dict = 2
SCREAMING_SNAKE_CASE : Optional[Any] = 3
SCREAMING_SNAKE_CASE : List[str] = 4
SCREAMING_SNAKE_CASE : Any = 5
@dataclass
class a ( lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
class a :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = SCHEDULER_CONFIG_NAME
SCREAMING_SNAKE_CASE : Union[str, Any] = ["dtype"]
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[Any] = True
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] , snake_case : Optional[int] = None , snake_case : str = None , snake_case : Optional[int]=False , **snake_case : str , ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase , subfolder=_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = cls.from_config(_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase )
if hasattr(_lowerCamelCase , '''create_state''' ) and getattr(_lowerCamelCase , '''has_state''' , _lowerCamelCase ):
__UpperCAmelCase : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCamelCase__ ( self : Optional[Any] , snake_case : List[Any] , snake_case : Dict = False , **snake_case : Union[str, Any] ) -> str:
self.save_config(save_directory=_lowerCamelCase , push_to_hub=_lowerCamelCase , **_lowerCamelCase )
@property
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
return self._get_compatibles()
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] ) -> int:
__UpperCAmelCase : Tuple = list(set([cls.__name__] + cls._compatibles ) )
__UpperCAmelCase : Optional[int] = importlib.import_module(__name__.split('''.''' )[0] )
__UpperCAmelCase : Any = [
getattr(_lowerCamelCase , _lowerCamelCase ) for c in compatible_classes_str if hasattr(_lowerCamelCase , _lowerCamelCase )
]
return compatible_classes
def _a ( _lowercase : jnp.ndarray , _lowercase : Tuple[int] ):
'''simple docstring'''
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def _a ( _lowercase : int , _lowercase : Union[str, Any]=0.999 , _lowercase : Union[str, Any]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(_lowercase : Any ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
__UpperCAmelCase : str = []
for i in range(lowerCamelCase_ ):
__UpperCAmelCase : List[Any] = i / num_diffusion_timesteps
__UpperCAmelCase : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class a :
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
@classmethod
def lowerCamelCase__ ( cls : List[str] , snake_case : List[str] ) -> Optional[Any]:
__UpperCAmelCase : Dict = scheduler.config
if config.trained_betas is not None:
__UpperCAmelCase : int = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__UpperCAmelCase : Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCAmelCase : Dict = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCAmelCase : List[str] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
__UpperCAmelCase : List[str] = 1.0 - betas
__UpperCAmelCase : List[Any] = jnp.cumprod(_lowerCamelCase , axis=0 )
return cls(
alphas=_lowerCamelCase , betas=_lowerCamelCase , alphas_cumprod=_lowerCamelCase , )
def _a ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = state.alphas_cumprod
__UpperCAmelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5
__UpperCAmelCase : List[str] = sqrt_alpha_prod.flatten()
__UpperCAmelCase : Optional[Any] = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
__UpperCAmelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCAmelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
__UpperCAmelCase : str = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _a ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__UpperCAmelCase : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _a ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : int = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__UpperCAmelCase : int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 712
|
'''simple docstring'''
def _a ( _lowercase : int = 50000000 ):
'''simple docstring'''
__UpperCAmelCase : List[str] = set()
__UpperCAmelCase : Tuple = int((limit - 24) ** (1 / 2) )
__UpperCAmelCase : Tuple = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _lowercase ) ) )
for primea in primes:
__UpperCAmelCase : Any = primea * primea
for primea in primes:
__UpperCAmelCase : Any = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__UpperCAmelCase : Optional[Any] = primea * primea * primea * primea
__UpperCAmelCase : Union[str, Any] = square + cube + tetr
if total >= limit:
break
ret.add(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 266
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 314
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
lowercase_ = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def lowerCamelCase ( ) ->Union[str, Any]:
_SCREAMING_SNAKE_CASE = Github(os.environ["""GITHUB_TOKEN"""] )
_SCREAMING_SNAKE_CASE = g.get_repo("""huggingface/diffusers""" )
_SCREAMING_SNAKE_CASE = repo.get_issues(state="""open""" )
for issue in open_issues:
_SCREAMING_SNAKE_CASE = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 314
| 1
|
from __future__ import annotations
import math
def __UpperCamelCase ( _A : int , _A : int , _A : bool , _A : list[int] , _A : float ) ->int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
)
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
lowerCamelCase_ =[90, 23, 6, 33, 21, 65, 123, 34423]
lowerCamelCase_ =math.log(len(_A ) , 2 )
print(f'Optimal value : {minimax(0 , 0 , _A , _A , _A )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 713
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Union[str, Any] = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , )-> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =size if size is not None else {"""shortest_edge""": 224}
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =resample
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ =int((256 / 224) * size["""shortest_edge"""] )
lowerCamelCase_ =get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )-> BatchFeature:
lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ =resample if resample is not None else self.resample
lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ =image_std if image_std is not None else self.image_std
lowerCamelCase_ =size if size is not None else self.size
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
lowerCamelCase_ =make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase_ =[to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowerCamelCase_ =[self.resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowerCamelCase_ =[self.center_crop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCamelCase_ =[self.rescale(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowerCamelCase_ =[self.normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCamelCase_ =[to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCamelCase_ ={"""pixel_values""": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 75
| 0
|
from __future__ import annotations
def __lowerCamelCase (UpperCAmelCase__ : list[int] ):
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) // 2
# choose the middle 3 elements
SCREAMING_SNAKE_CASE = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 403
|
def __lowerCamelCase (UpperCAmelCase__ : Dict ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ )
# create the counting array
SCREAMING_SNAKE_CASE = coll_max + 1 - coll_min
SCREAMING_SNAKE_CASE = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = counting_arr[i] + counting_arr[i - 1]
# create the output collection
SCREAMING_SNAKE_CASE = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
return "".join([chr(UpperCAmelCase__ ) for i in counting_sort([ord(UpperCAmelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
_lowerCamelCase : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCamelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 403
| 1
|
'''simple docstring'''
def a ( __a = 1000 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 714
|
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = n
UpperCamelCase__ :Tuple = [None] * self.n
UpperCamelCase__ :str = 0 # index of the first element
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :Dict = 0
def __len__( self ):
'''simple docstring'''
return self.size
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.size == 0
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCamelCase__ :List[Any] = data
UpperCamelCase__ :List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCamelCase__ :Dict = self.array[self.front]
UpperCamelCase__ :Union[str, Any] = None
UpperCamelCase__ :Union[str, Any] = (self.front + 1) % self.n
self.size -= 1
return temp
| 280
| 0
|
'''simple docstring'''
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,) -> Any:
__lowerCamelCase : Tuple = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__lowerCamelCase : str = 1 - (matter_density + radiation_density + dark_energy)
__lowerCamelCase : Optional[int] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__lowerCamelCase : Optional[int] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_UpperCamelCase = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 459
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class snake_case ( _snake_case ):
'''simple docstring'''
UpperCamelCase__ : torch.FloatTensor
class snake_case ( _snake_case , _snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 3 , lowerCamelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCamelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCamelCase_ : Tuple[int] = (64,) , lowerCamelCase_ : int = 1 , lowerCamelCase_ : str = "silu" , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : int = 256 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : float = 0.18215 , lowerCamelCase_ : str = "group" , ) ->str:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
UpperCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
UpperCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
UpperCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
UpperCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = True ) ->VQEncoderOutput:
'''simple docstring'''
UpperCAmelCase__ = self.encoder(lowerCamelCase_ )
UpperCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def UpperCAmelCase ( self : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if not force_not_quantize:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
UpperCAmelCase__ = h
UpperCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
UpperCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def UpperCAmelCase ( self : Any , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase__ = sample
UpperCAmelCase__ = self.encode(lowerCamelCase_ ).latents
UpperCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
| 392
| 0
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
def __a ( self :Any) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase_ = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_lowercase , cache_dir=_lowercase)
UpperCAmelCase_ = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase)[0] , '''snapshots'''))]
UpperCAmelCase_ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''') for f in files)
@slow
@require_flax
class a_ ( unittest.TestCase ):
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_lowercase)
UpperCAmelCase_ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = 4
UpperCAmelCase_ = jax.device_count()
UpperCAmelCase_ = num_samples * [prompt]
UpperCAmelCase_ = pipeline.prepare_inputs(_lowercase)
# shard inputs and rng
UpperCAmelCase_ = replicate(_lowercase)
UpperCAmelCase_ = jax.random.split(_lowercase , _lowercase)
UpperCAmelCase_ = shard(_lowercase)
UpperCAmelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_514_745) < 1E-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa).sum() - 49_947.875) < 5E-1
UpperCAmelCase_ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(_lowercase) == num_samples
def __a ( self :List[str]) -> str:
UpperCAmelCase_ , UpperCAmelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=_lowercase)
UpperCAmelCase_ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = 50
UpperCAmelCase_ = jax.device_count()
UpperCAmelCase_ = num_samples * [prompt]
UpperCAmelCase_ = pipeline.prepare_inputs(_lowercase)
# shard inputs and rng
UpperCAmelCase_ = replicate(_lowercase)
UpperCAmelCase_ = jax.random.split(_lowercase , _lowercase)
UpperCAmelCase_ = shard(_lowercase)
UpperCAmelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_652_401)) < 1E-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa).sum() - 2_383_808.2)) < 5E-1
def __a ( self :int) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_lowercase)
UpperCAmelCase_ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = 50
UpperCAmelCase_ = jax.device_count()
UpperCAmelCase_ = num_samples * [prompt]
UpperCAmelCase_ = pipeline.prepare_inputs(_lowercase)
# shard inputs and rng
UpperCAmelCase_ = replicate(_lowercase)
UpperCAmelCase_ = jax.random.split(_lowercase , _lowercase)
UpperCAmelCase_ = shard(_lowercase)
UpperCAmelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_003_906)) < 1E-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa).sum() - 2_373_516.75)) < 5E-1
def __a ( self :List[Any]) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa)
UpperCAmelCase_ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = 50
UpperCAmelCase_ = jax.device_count()
UpperCAmelCase_ = num_samples * [prompt]
UpperCAmelCase_ = pipeline.prepare_inputs(_lowercase)
# shard inputs and rng
UpperCAmelCase_ = replicate(_lowercase)
UpperCAmelCase_ = jax.random.split(_lowercase , _lowercase)
UpperCAmelCase_ = shard(_lowercase)
UpperCAmelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_003_906)) < 1E-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa).sum() - 2_373_516.75)) < 5E-1
def __a ( self :List[Any]) -> List[str]:
UpperCAmelCase_ = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
UpperCAmelCase_ , UpperCAmelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
UpperCAmelCase_ = scheduler.create_state()
UpperCAmelCase_ = scheduler_state
UpperCAmelCase_ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = 50
UpperCAmelCase_ = jax.device_count()
UpperCAmelCase_ = num_samples * [prompt]
UpperCAmelCase_ = pipeline.prepare_inputs(_lowercase)
# shard inputs and rng
UpperCAmelCase_ = replicate(_lowercase)
UpperCAmelCase_ = jax.random.split(_lowercase , _lowercase)
UpperCAmelCase_ = shard(_lowercase)
UpperCAmelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.045_043_945)) < 1E-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa).sum() - 2_347_693.5)) < 5E-1
def __a ( self :Optional[Any]) -> Tuple:
UpperCAmelCase_ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase_ = jax.device_count()
UpperCAmelCase_ = num_samples * [prompt]
UpperCAmelCase_ = jax.random.split(jax.random.PRNGKey(0) , _lowercase)
UpperCAmelCase_ , UpperCAmelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
UpperCAmelCase_ = replicate(_lowercase)
UpperCAmelCase_ = pipeline.prepare_inputs(_lowercase)
UpperCAmelCase_ = shard(_lowercase)
UpperCAmelCase_ = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase_ , UpperCAmelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
UpperCAmelCase_ = replicate(_lowercase)
UpperCAmelCase_ = pipeline.prepare_inputs(_lowercase)
UpperCAmelCase_ = shard(_lowercase)
UpperCAmelCase_ = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1E-2
| 561
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a_ :
UpperCamelCase__ : int
UpperCamelCase__ : Node | None =None
UpperCamelCase__ : Node | None =None
def A ( ) -> Node | None:
'''simple docstring'''
UpperCAmelCase_ = Node(1 )
UpperCAmelCase_ = Node(2 )
UpperCAmelCase_ = Node(3 )
UpperCAmelCase_ = Node(4 )
UpperCAmelCase_ = Node(5 )
return tree
def A ( __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def A ( __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def A ( __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def A ( __UpperCAmelCase ) -> Sequence[Node | None]:
'''simple docstring'''
UpperCAmelCase_ = []
if root is None:
return output
UpperCAmelCase_ = deque([root] )
while process_queue:
UpperCAmelCase_ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Sequence[Node | None]:
'''simple docstring'''
UpperCAmelCase_ = []
def populate_output(__UpperCAmelCase , __UpperCAmelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__UpperCAmelCase , __UpperCAmelCase )
return output
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Sequence[Node | None]:
'''simple docstring'''
UpperCAmelCase_ = []
def populate_output(__UpperCAmelCase , __UpperCAmelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__UpperCAmelCase , __UpperCAmelCase )
return output
def A ( __UpperCAmelCase ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = height(__UpperCAmelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCAmelCase_ = 1
else:
output.append(get_nodes_from_right_to_left(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCAmelCase_ = 0
return output
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
UpperCAmelCase_ = make_tree()
print(f"In-order Traversal: {inorder(__UpperCAmelCase )}" )
print(f"Pre-order Traversal: {preorder(__UpperCAmelCase )}" )
print(f"Post-order Traversal: {postorder(__UpperCAmelCase )}" , '''\n''' )
print(f"Height of Tree: {height(__UpperCAmelCase )}" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(__UpperCAmelCase ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(__UpperCAmelCase ) + 1 ):
print(f"Level {level}:" , get_nodes_from_left_to_right(__UpperCAmelCase , level=__UpperCAmelCase ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(__UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 561
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def _lowercase ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : str ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(SCREAMING_SNAKE_CASE_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="""longest""" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def _lowercase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , SCREAMING_SNAKE_CASE_ ) == "1":
UpperCamelCase = 2
# New Code #
UpperCamelCase = int(args.gradient_accumulation_steps )
UpperCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["""lr"""]
UpperCamelCase = int(config["""num_epochs"""] )
UpperCamelCase = int(config["""seed"""] )
UpperCamelCase = int(config["""batch_size"""] )
UpperCamelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
with LocalSGD(
accelerator=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , local_sgd_steps=SCREAMING_SNAKE_CASE_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , SCREAMING_SNAKE_CASE_ )
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=SCREAMING_SNAKE_CASE_ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 386
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( UpperCamelCase_ ):
_UpperCamelCase : Any = ['''pixel_values''']
def __init__( self : Any , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowercase : List[str] = size if size is not None else {'''height''': 256, '''width''': 256}
lowercase : Dict = get_size_dict(UpperCamelCase__ )
lowercase : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[int] = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' )
lowercase : str = do_resize
lowercase : int = size
lowercase : Dict = resample
lowercase : str = do_center_crop
lowercase : Dict = crop_size
lowercase : Optional[Any] = do_rescale
lowercase : List[str] = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> Any:
"""simple docstring"""
lowercase : List[str] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
UpperCamelCase__ , size=(size['''height'''], size['''width''']) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __a ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(UpperCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __a ( self : Any , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Optional[Any]:
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : int=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
lowercase : int = resample if resample is not None else self.resample
lowercase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : int = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase : List[Any] = image_std if image_std is not None else self.image_std
lowercase : Any = size if size is not None else self.size
lowercase : Tuple = get_size_dict(UpperCamelCase__ )
lowercase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase : Dict = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' )
lowercase : List[str] = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Dict = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase : Optional[Any] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase : Optional[int] = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase : int = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase : Tuple = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
lowercase : Optional[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase : int = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 712
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase_ = get_logger(__name__)
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> Optional[Any]:
'''simple docstring'''
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase : Any = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase : Optional[Any] = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
lowercase : Union[str, Any] = os.path.join(__magic_name__ , __magic_name__ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(__magic_name__ , __magic_name__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase : List[str] = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(__magic_name__ , __magic_name__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase : Union[str, Any] = os.path.join(__magic_name__ , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
logger.info(F"""Saving model to {ckpt_dir}""" )
lowercase : Optional[Any] = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__magic_name__ , storage_writer=dist_cp.FileSystemWriter(__magic_name__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> Optional[Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__magic_name__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
lowercase : List[str] = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
lowercase : Union[str, Any] = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Loading model from {input_model_file}""" )
lowercase : Union[str, Any] = torch.load(__magic_name__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase : Tuple = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase : Any = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Loading model from {input_model_file}""" )
lowercase : Union[str, Any] = torch.load(__magic_name__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase : Optional[int] = (
os.path.join(__magic_name__ , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
lowercase : Optional[int] = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__magic_name__ , storage_reader=dist_cp.FileSystemReader(__magic_name__ ) , planner=DefaultLoadPlanner() , )
lowercase : Dict = state_dict['''model''']
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> int:
'''simple docstring'''
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase : Tuple = FSDP.optim_state_dict(__magic_name__ , __magic_name__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase : List[Any] = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__magic_name__ , __magic_name__ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
lowercase : Tuple = os.path.join(__magic_name__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(__magic_name__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> str:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase : Tuple = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase : int = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
lowercase : Optional[int] = torch.load(__magic_name__ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
lowercase : str = (
os.path.join(__magic_name__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
lowercase : int = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(__magic_name__ ) , )
lowercase : Optional[int] = optim_state['''optimizer''']
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
lowercase : int = FSDP.optim_state_dict_to_load(__magic_name__ , __magic_name__ , __magic_name__ )
optimizer.load_state_dict(__magic_name__ )
| 596
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.